aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/apic.h4
-rw-r--r--include/asm-x86/asm.h40
-rw-r--r--include/asm-x86/atomic_64.h6
-rw-r--r--include/asm-x86/bitops.h68
-rw-r--r--include/asm-x86/cpufeature.h4
-rw-r--r--include/asm-x86/current.h42
-rw-r--r--include/asm-x86/current_32.h17
-rw-r--r--include/asm-x86/current_64.h27
-rw-r--r--include/asm-x86/e820.h1
-rw-r--r--include/asm-x86/fixmap_32.h8
-rw-r--r--include/asm-x86/gart.h63
-rw-r--r--include/asm-x86/hardirq.h6
-rw-r--r--include/asm-x86/i8259.h2
-rw-r--r--include/asm-x86/io_apic.h13
-rw-r--r--include/asm-x86/kvm_host.h4
-rw-r--r--include/asm-x86/kvm_para.h33
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h4
-rw-r--r--include/asm-x86/mmconfig.h12
-rw-r--r--include/asm-x86/msr-index.h4
-rw-r--r--include/asm-x86/msr.h2
-rw-r--r--include/asm-x86/nmi.h27
-rw-r--r--include/asm-x86/page.h11
-rw-r--r--include/asm-x86/paravirt.h43
-rw-r--r--include/asm-x86/pat.h8
-rw-r--r--include/asm-x86/pci.h2
-rw-r--r--include/asm-x86/pci_32.h14
-rw-r--r--include/asm-x86/pgtable.h72
-rw-r--r--include/asm-x86/processor-flags.h6
-rw-r--r--include/asm-x86/processor.h6
-rw-r--r--include/asm-x86/ptrace.h8
-rw-r--r--include/asm-x86/pvclock-abi.h42
-rw-r--r--include/asm-x86/pvclock.h13
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-x86/required-features.h6
-rw-r--r--include/asm-x86/resume-trace.h2
-rw-r--r--include/asm-x86/seccomp_32.h1
-rw-r--r--include/asm-x86/seccomp_64.h1
-rw-r--r--include/asm-x86/string_32.h323
-rw-r--r--include/asm-x86/suspend_32.h5
-rw-r--r--include/asm-x86/system.h2
-rw-r--r--include/asm-x86/unistd_64.h2
-rw-r--r--include/asm-x86/vm86.h11
-rw-r--r--include/asm-x86/xen/hypercall.h11
-rw-r--r--include/asm-x86/xen/page.h29
-rw-r--r--include/asm-x86/xor_32.h5
-rw-r--r--include/asm-x86/xor_64.h5
46 files changed, 636 insertions, 381 deletions
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index be9639a9a18..313bcaf4b6c 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -36,14 +36,10 @@ extern void generic_apic_probe(void);
#ifdef CONFIG_X86_LOCAL_APIC
extern int apic_verbosity;
-extern int timer_over_8254;
extern int local_apic_timer_c2_ok;
-extern int local_apic_timer_disabled;
-extern int apic_runs_main_timer;
extern int ioapic_force;
extern int disable_apic;
-extern int disable_apic_timer;
/*
* Basic functions accessing APICs.
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 90dec0c2364..70939820c55 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -1,33 +1,29 @@
#ifndef _ASM_X86_ASM_H
#define _ASM_X86_ASM_H
-#ifdef CONFIG_X86_32
-/* 32 bits */
-
-# define _ASM_PTR " .long "
-# define _ASM_ALIGN " .balign 4 "
-# define _ASM_MOV_UL " movl "
-
-# define _ASM_INC " incl "
-# define _ASM_DEC " decl "
-# define _ASM_ADD " addl "
-# define _ASM_SUB " subl "
-# define _ASM_XADD " xaddl "
+#ifdef __ASSEMBLY__
+# define __ASM_FORM(x) x
+#else
+# define __ASM_FORM(x) " " #x " "
+#endif
+#ifdef CONFIG_X86_32
+# define __ASM_SEL(a,b) __ASM_FORM(a)
#else
-/* 64 bits */
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+#endif
-# define _ASM_PTR " .quad "
-# define _ASM_ALIGN " .balign 8 "
-# define _ASM_MOV_UL " movq "
+#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
-# define _ASM_INC " incq "
-# define _ASM_DEC " decq "
-# define _ASM_ADD " addq "
-# define _ASM_SUB " subq "
-# define _ASM_XADD " xaddq "
+#define _ASM_PTR __ASM_SEL(.long, .quad)
+#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
+#define _ASM_MOV_UL __ASM_SIZE(mov)
-#endif /* CONFIG_X86_32 */
+#define _ASM_INC __ASM_SIZE(inc)
+#define _ASM_DEC __ASM_SIZE(dec)
+#define _ASM_ADD __ASM_SIZE(add)
+#define _ASM_SUB __ASM_SIZE(sub)
+#define _ASM_XADD __ASM_SIZE(xadd)
/* Exception table entry */
# define _ASM_EXTABLE(from,to) \
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 3e0cd7d3833..fe589c153db 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -11,12 +11,6 @@
* resource counting etc..
*/
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index ee4b3ead6a4..96b1829cea1 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -23,11 +23,21 @@
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
-#define ADDR "=m" (*(volatile long *) addr)
+#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else
-#define ADDR "+m" (*(volatile long *) addr)
+#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif
+#define ADDR BITOP_ADDR(addr)
+
+/*
+ * We do the locked ops that don't return the old value as
+ * a mask operation on a byte.
+ */
+#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK(nr) (1 << ((nr) & 7))
+
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@@ -43,9 +53,17 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(int nr, volatile void *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
{
- asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
+ if (IS_IMMEDIATE(nr)) {
+ asm volatile(LOCK_PREFIX "orb %1,%0"
+ : CONST_MASK_ADDR(nr, addr)
+ : "iq" ((u8)CONST_MASK(nr))
+ : "memory");
+ } else {
+ asm volatile(LOCK_PREFIX "bts %1,%0"
+ : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+ }
}
/**
@@ -57,7 +75,7 @@ static inline void set_bit(int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile void *addr)
+static inline void __set_bit(int nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
@@ -72,9 +90,17 @@ static inline void __set_bit(int nr, volatile void *addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static inline void clear_bit(int nr, volatile void *addr)
+static inline void clear_bit(int nr, volatile unsigned long *addr)
{
- asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr));
+ if (IS_IMMEDIATE(nr)) {
+ asm volatile(LOCK_PREFIX "andb %1,%0"
+ : CONST_MASK_ADDR(nr, addr)
+ : "iq" ((u8)~CONST_MASK(nr)));
+ } else {
+ asm volatile(LOCK_PREFIX "btr %1,%0"
+ : BITOP_ADDR(addr)
+ : "Ir" (nr));
+ }
}
/*
@@ -85,13 +111,13 @@ static inline void clear_bit(int nr, volatile void *addr)
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
-static inline void clear_bit_unlock(unsigned nr, volatile void *addr)
+static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
@@ -108,7 +134,7 @@ static inline void __clear_bit(int nr, volatile void *addr)
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
-static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
+static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
@@ -126,7 +152,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(int nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
@@ -140,7 +166,7 @@ static inline void __change_bit(int nr, volatile void *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile void *addr)
+static inline void change_bit(int nr, volatile unsigned long *addr)
{
asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
}
@@ -153,7 +179,7 @@ static inline void change_bit(int nr, volatile void *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
@@ -170,7 +196,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
*
* This is the same as test_and_set_bit on x86.
*/
-static inline int test_and_set_bit_lock(int nr, volatile void *addr)
+static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
@@ -184,7 +210,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile void *addr)
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
@@ -203,7 +229,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_clear_bit(int nr, volatile void *addr)
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
@@ -223,7 +249,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile void *addr)
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
@@ -235,7 +261,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
@@ -255,7 +281,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile void *addr)
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
@@ -266,13 +292,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
return oldbit;
}
-static inline int constant_test_bit(int nr, const volatile void *addr)
+static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
}
-static inline int variable_test_bit(int nr, volatile const void *addr)
+static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
{
int oldbit;
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 0d609c837a4..78b47e7404e 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -142,11 +142,11 @@ extern const char * const x86_power_flags[32];
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
#define setup_clear_cpu_cap(bit) do { \
clear_cpu_cap(&boot_cpu_data, bit); \
- set_bit(bit, cleared_cpu_caps); \
+ set_bit(bit, (unsigned long *)cleared_cpu_caps); \
} while (0)
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
- clear_bit(bit, cleared_cpu_caps); \
+ clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
index d2526d3f734..7515c19d498 100644
--- a/include/asm-x86/current.h
+++ b/include/asm-x86/current.h
@@ -1,5 +1,39 @@
+#ifndef _X86_CURRENT_H
+#define _X86_CURRENT_H
+
#ifdef CONFIG_X86_32
-# include "current_32.h"
-#else
-# include "current_64.h"
-#endif
+#include <linux/compiler.h>
+#include <asm/percpu.h>
+
+struct task_struct;
+
+DECLARE_PER_CPU(struct task_struct *, current_task);
+static __always_inline struct task_struct *get_current(void)
+{
+ return x86_read_percpu(current_task);
+}
+
+#else /* X86_32 */
+
+#ifndef __ASSEMBLY__
+#include <asm/pda.h>
+
+struct task_struct;
+
+static __always_inline struct task_struct *get_current(void)
+{
+ return read_pda(pcurrent);
+}
+
+#else /* __ASSEMBLY__ */
+
+#include <asm/asm-offsets.h>
+#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* X86_32 */
+
+#define current get_current()
+
+#endif /* X86_CURRENT_H */
diff --git a/include/asm-x86/current_32.h b/include/asm-x86/current_32.h
deleted file mode 100644
index 5af9bdb97a1..00000000000
--- a/include/asm-x86/current_32.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _I386_CURRENT_H
-#define _I386_CURRENT_H
-
-#include <linux/compiler.h>
-#include <asm/percpu.h>
-
-struct task_struct;
-
-DECLARE_PER_CPU(struct task_struct *, current_task);
-static __always_inline struct task_struct *get_current(void)
-{
- return x86_read_percpu(current_task);
-}
-
-#define current get_current()
-
-#endif /* !(_I386_CURRENT_H) */
diff --git a/include/asm-x86/current_64.h b/include/asm-x86/current_64.h
deleted file mode 100644
index 2d368ede2fc..00000000000
--- a/include/asm-x86/current_64.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _X86_64_CURRENT_H
-#define _X86_64_CURRENT_H
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
-
-#include <asm/pda.h>
-
-static inline struct task_struct *get_current(void)
-{
- struct task_struct *t = read_pda(pcurrent);
- return t;
-}
-
-#define current get_current()
-
-#else
-
-#ifndef ASM_OFFSET_H
-#include <asm/asm-offsets.h>
-#endif
-
-#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
-
-#endif
-
-#endif /* !(_X86_64_CURRENT_H) */
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 7004251fc66..5103d0b2c46 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -24,6 +24,7 @@ struct e820map {
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
+#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
#define BIOS_BEGIN 0x000a0000
#define BIOS_END 0x00100000
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index 4b96148e90c..f0df7ee9681 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -79,10 +79,6 @@ enum fixed_addresses {
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
-#ifdef CONFIG_ACPI
- FIX_ACPI_BEGIN,
- FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
@@ -103,6 +99,10 @@ enum fixed_addresses {
(__end_of_permanent_fixed_addresses & 511),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_NESTING - 1,
FIX_WP_TEST,
+#ifdef CONFIG_ACPI
+ FIX_ACPI_BEGIN,
+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
+#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 90958ed993f..eeca2f51fd8 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -1,16 +1,20 @@
#ifndef _ASM_X8664_IOMMU_H
#define _ASM_X8664_IOMMU_H 1
+#include <asm/e820.h>
+
extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
extern int force_iommu, no_iommu;
extern int iommu_detected;
+extern int agp_amd64_init(void);
#ifdef CONFIG_GART_IOMMU
extern void gart_iommu_init(void);
extern void gart_iommu_shutdown(void);
extern void __init gart_parse_options(char *);
extern void early_gart_iommu_check(void);
extern void gart_iommu_hole_init(void);
+extern void set_up_gart_resume(u32, u32);
extern int fallback_aper_order;
extern int fallback_aper_force;
extern int gart_iommu_aperture;
@@ -31,4 +35,63 @@ static inline void gart_iommu_shutdown(void)
#endif
+/* PTE bits. */
+#define GPTE_VALID 1
+#define GPTE_COHERENT 2
+
+/* Aperture control register bits. */
+#define GARTEN (1<<0)
+#define DISGARTCPU (1<<4)
+#define DISGARTIO (1<<5)
+
+/* GART cache control register bits. */
+#define INVGART (1<<0)
+#define GARTPTEERR (1<<1)
+
+/* K8 On-cpu GART registers */
+#define AMD64_GARTAPERTURECTL 0x90
+#define AMD64_GARTAPERTUREBASE 0x94
+#define AMD64_GARTTABLEBASE 0x98
+#define AMD64_GARTCACHECTL 0x9c
+#define AMD64_GARTEN (1<<0)
+
+static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
+{
+ u32 tmp, ctl;
+
+ /* address of the mappings table */
+ addr >>= 12;
+ tmp = (u32) addr<<4;
+ tmp &= ~0xf;
+ pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+ /* Enable GART translation for this hammer. */
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+ ctl |= GARTEN;
+ ctl &= ~(DISGARTCPU | DISGARTIO);
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+}
+
+static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+{
+ if (!aper_base)
+ return 0;
+
+ if (aper_base + aper_size > 0x100000000ULL) {
+ printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
+ return 0;
+ }
+ if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
+ printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
+ return 0;
+ }
+ if (aper_size < min_size) {
+ printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
+ aper_size>>20, min_size>>20);
+ return 0;
+ }
+
+ return 1;
+}
+
#endif
diff --git a/include/asm-x86/hardirq.h b/include/asm-x86/hardirq.h
index 314434d664e..000787df66e 100644
--- a/include/asm-x86/hardirq.h
+++ b/include/asm-x86/hardirq.h
@@ -3,3 +3,9 @@
#else
# include "hardirq_64.h"
#endif
+
+extern u64 arch_irq_stat_cpu(unsigned int cpu);
+#define arch_irq_stat_cpu arch_irq_stat_cpu
+
+extern u64 arch_irq_stat(void);
+#define arch_irq_stat arch_irq_stat
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 45d4df3e51e..2f98df91f1f 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -55,4 +55,6 @@ static inline void outb_pic(unsigned char value, unsigned int port)
udelay(2);
}
+extern struct irq_chip i8259A_chip;
+
#endif /* __ASM_I8259_H__ */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index d593e14f034..dc0f55f2b03 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -11,6 +11,15 @@
* Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
*/
+/* I/O Unit Redirection Table */
+#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
+#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
+#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
+#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
+#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
+#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
+#define IO_APIC_REDIR_MASKED (1 << 16)
+
/*
* The structure of the IO-APIC:
*/
@@ -137,6 +146,9 @@ extern int sis_apic_bug;
/* 1 if "noapic" boot option passed */
extern int skip_ioapic_setup;
+/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
+extern int timer_through_8259;
+
static inline void disable_ioapic_setup(void)
{
skip_ioapic_setup = 1;
@@ -162,6 +174,7 @@ extern void ioapic_init_mappings(void);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
+static const int timer_through_8259 = 0;
#endif
#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 1d8cd01fa51..844f2a89afb 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -18,6 +18,7 @@
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
+#include <asm/pvclock-abi.h>
#include <asm/desc.h>
#define KVM_MAX_VCPUS 16
@@ -282,7 +283,8 @@ struct kvm_vcpu_arch {
struct x86_emulate_ctxt emulate_ctxt;
gpa_t time;
- struct kvm_vcpu_time_info hv_clock;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hv_clock_tsc_khz;
unsigned int time_offset;
struct page *time_page;
};
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index 50984594207..76f392146da 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -48,24 +48,6 @@ struct kvm_mmu_op_release_pt {
#ifdef __KERNEL__
#include <asm/processor.h>
-/* xen binary-compatible interface. See xen headers for details */
-struct kvm_vcpu_time_info {
- uint32_t version;
- uint32_t pad0;
- uint64_t tsc_timestamp;
- uint64_t system_time;
- uint32_t tsc_to_system_mul;
- int8_t tsc_shift;
- int8_t pad[3];
-} __attribute__((__packed__)); /* 32 bytes */
-
-struct kvm_wall_clock {
- uint32_t wc_version;
- uint32_t wc_sec;
- uint32_t wc_nsec;
-} __attribute__((__packed__));
-
-
extern void kvmclock_init(void);
@@ -89,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr)
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
- : "a"(nr));
+ : "a"(nr)
+ : "memory");
return ret;
}
@@ -98,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
- : "a"(nr), "b"(p1));
+ : "a"(nr), "b"(p1)
+ : "memory");
return ret;
}
@@ -108,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
- : "a"(nr), "b"(p1), "c"(p2));
+ : "a"(nr), "b"(p1), "c"(p2)
+ : "memory");
return ret;
}
@@ -118,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
- : "a"(nr), "b"(p1), "c"(p2), "d"(p3));
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
+ : "memory");
return ret;
}
@@ -129,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
long ret;
asm volatile(KVM_HYPERCALL
: "=a"(ret)
- : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4));
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
+ : "memory");
return ret;
}
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 56d0e1fa025..b63c5218200 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -41,8 +41,10 @@ static inline void __init smpboot_setup_io_apic(void)
*/
if (!skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
- else
+ else {
nr_ioapics = 0;
+ localise_nmi_watchdog();
+ }
}
static inline void smpboot_clear_io_apic(void)
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
new file mode 100644
index 00000000000..95beda07c6f
--- /dev/null
+++ b/include/asm-x86/mmconfig.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_MMCONFIG_H
+#define _ASM_MMCONFIG_H
+
+#ifdef CONFIG_PCI_MMCONFIG
+extern void __cpuinit fam10h_check_enable_mmcfg(void);
+extern void __init check_enable_amd_mmconf_dmi(void);
+#else
+static inline void fam10h_check_enable_mmcfg(void) { }
+static inline void check_enable_amd_mmconf_dmi(void) { }
+#endif
+
+#endif
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 09413ad39d3..44bce773012 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -111,7 +111,9 @@
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_K8_SYSCFG 0xc0010010
#define MSR_K8_HWCR 0xc0010015
-#define MSR_K8_ENABLE_C1E 0xc0010055
+#define MSR_K8_INT_PENDING_MSG 0xc0010055
+/* C1E active bits in int pending message */
+#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 3707650a169..2b5f2c91db2 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -18,7 +18,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux)
unsigned long low, high;
asm volatile(".byte 0x0f,0x01,0xf9"
: "=a" (low), "=d" (high), "=c" (*aux));
- return low | ((u64)high >> 32);
+ return low | ((u64)high << 32);
}
/*
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 1e363021e72..05449ef830a 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -15,27 +15,6 @@
*/
int do_nmi_callback(struct pt_regs *regs, int cpu);
-#ifdef CONFIG_PM
-
-/** Replace the PM callback routine for NMI. */
-struct pm_dev *set_nmi_pm_callback(pm_callback callback);
-
-/** Unset the PM callback routine back to the default. */
-void unset_nmi_pm_callback(struct pm_dev *dev);
-
-#else
-
-static inline struct pm_dev *set_nmi_pm_callback(pm_callback callback)
-{
- return 0;
-}
-
-static inline void unset_nmi_pm_callback(struct pm_dev *dev)
-{
-}
-
-#endif /* CONFIG_PM */
-
#ifdef CONFIG_X86_64
extern void default_do_nmi(struct pt_regs *);
extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
@@ -46,7 +25,6 @@ extern void nmi_watchdog_default(void);
extern int check_nmi_watchdog(void);
extern int nmi_watchdog_enabled;
-extern int unknown_nmi_panic;
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int avail_to_resrv_perfctr_nmi(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
@@ -78,6 +56,11 @@ extern int unknown_nmi_panic;
void __trigger_all_cpu_backtrace(void);
#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+static inline void localise_nmi_watchdog(void)
+{
+ if (nmi_watchdog == NMI_IO_APIC)
+ nmi_watchdog = NMI_LOCAL_APIC;
+}
#endif
void lapic_watchdog_stop(void);
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index dc936dddf16..b52ed85f32f 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -51,8 +51,15 @@
#ifndef __ASSEMBLY__
+typedef struct { pgdval_t pgd; } pgd_t;
+typedef struct { pgprotval_t pgprot; } pgprot_t;
+
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
+extern void map_devmem(unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot);
+extern void unmap_devmem(unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot);
extern unsigned long max_pfn_mapped;
@@ -74,9 +81,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pgprotval_t pgprot; } pgprot_t;
-
static inline pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val };
@@ -160,6 +164,7 @@ static inline pteval_t native_pte_val(pte_t pte)
#endif
#define pte_val(x) native_pte_val(x)
+#define pte_flags(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 0f13b945e24..e9ada314dfc 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -238,7 +238,13 @@ struct pv_mmu_ops {
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
+ pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
pteval_t (*pte_val)(pte_t);
+ pteval_t (*pte_flags)(pte_t);
pte_t (*make_pte)(pteval_t pte);
pgdval_t (*pgd_val)(pgd_t);
@@ -996,6 +1002,20 @@ static inline pteval_t pte_val(pte_t pte)
return ret;
}
+static inline pteval_t pte_flags(pte_t pte)
+{
+ pteval_t ret;
+
+ if (sizeof(pteval_t) > sizeof(long))
+ ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
+ pte.pte, (u64)pte.pte >> 32);
+ else
+ ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
+ pte.pte);
+
+ return ret;
+}
+
static inline pgd_t __pgd(pgdval_t val)
{
pgdval_t ret;
@@ -1024,6 +1044,29 @@ static inline pgdval_t pgd_val(pgd_t pgd)
return ret;
}
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pteval_t ret;
+
+ ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
+ mm, addr, ptep);
+
+ return (pte_t) { .pte = ret };
+}
+
+static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ if (sizeof(pteval_t) > sizeof(long))
+ /* 5 arg words */
+ pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
+ else
+ PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
+ mm, addr, ptep, pte.pte);
+}
+
static inline void set_pte(pte_t *ptep, pte_t pte)
{
if (sizeof(pteval_t) > sizeof(long))
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
index 88f60cc6a22..7edc4730721 100644
--- a/include/asm-x86/pat.h
+++ b/include/asm-x86/pat.h
@@ -1,14 +1,13 @@
-
#ifndef _ASM_PAT_H
-#define _ASM_PAT_H 1
+#define _ASM_PAT_H
#include <linux/types.h>
#ifdef CONFIG_X86_PAT
-extern int pat_wc_enabled;
+extern int pat_enabled;
extern void validate_pat_support(struct cpuinfo_x86 *c);
#else
-static const int pat_wc_enabled = 0;
+static const int pat_enabled;
static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif
@@ -21,4 +20,3 @@ extern int free_memtype(u64 start, u64 end);
extern void pat_disable(char *reason);
#endif
-
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 30bbde0cb34..2db14cf17db 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -18,6 +18,8 @@ struct pci_sysdata {
#endif
};
+extern int pci_routeirq;
+
/* scan a bus after allocating a pci_sysdata for it */
extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
int node);
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
index 8c4c3a0368e..a50d4685128 100644
--- a/include/asm-x86/pci_32.h
+++ b/include/asm-x86/pci_32.h
@@ -18,12 +18,14 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME[0];
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) unsigned LEN_NAME[0];
+#define pci_unmap_addr(PTR, ADDR_NAME) sizeof((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ do { break; } while (pci_unmap_addr(PTR, ADDR_NAME))
+#define pci_unmap_len(PTR, LEN_NAME) sizeof((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ do { break; } while (pci_unmap_len(PTR, LEN_NAME))
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 97c271b2910..bcb5446a08d 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -20,30 +20,25 @@
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-/*
- * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
- * sign-extended value on 32-bit with all 1's in the upper word,
- * which preserves the upper pte values on 64-bit ptes:
- */
-#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
-#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
-#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
-#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
-#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
-#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
-#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
-#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
-#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
-#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
-#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
-#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
-#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
-#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
+#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
+#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
+#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
+#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
+#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
+#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
+#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
+#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
-#define _PAGE_NX 0
+#define _PAGE_NX (_AT(pteval_t, 0))
#endif
/* If _PAGE_PRESENT is clear, we use these: */
@@ -164,37 +159,37 @@ extern struct list_head pgd_list;
*/
static inline int pte_dirty(pte_t pte)
{
- return pte_val(pte) & _PAGE_DIRTY;
+ return pte_flags(pte) & _PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
{
- return pte_val(pte) & _PAGE_ACCESSED;
+ return pte_flags(pte) & _PAGE_ACCESSED;
}
static inline int pte_write(pte_t pte)
{
- return pte_val(pte) & _PAGE_RW;
+ return pte_flags(pte) & _PAGE_RW;
}
static inline int pte_file(pte_t pte)
{
- return pte_val(pte) & _PAGE_FILE;
+ return pte_flags(pte) & _PAGE_FILE;
}
static inline int pte_huge(pte_t pte)
{
- return pte_val(pte) & _PAGE_PSE;
+ return pte_flags(pte) & _PAGE_PSE;
}
static inline int pte_global(pte_t pte)
{
- return pte_val(pte) & _PAGE_GLOBAL;
+ return pte_flags(pte) & _PAGE_GLOBAL;
}
static inline int pte_exec(pte_t pte)
{
- return !(pte_val(pte) & _PAGE_NX);
+ return !(pte_flags(pte) & _PAGE_NX);
}
static inline int pte_special(pte_t pte)
@@ -210,22 +205,22 @@ static inline int pmd_large(pmd_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
- return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
+ return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}
static inline pte_t pte_mkold(pte_t pte)
{
- return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
+ return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
- return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
+ return __pte(pte_val(pte) & ~_PAGE_RW);
}
static inline pte_t pte_mkexec(pte_t pte)
{
- return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
+ return __pte(pte_val(pte) & ~_PAGE_NX);
}
static inline pte_t pte_mkdirty(pte_t pte)
@@ -250,7 +245,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
static inline pte_t pte_clrhuge(pte_t pte)
{
- return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
+ return __pte(pte_val(pte) & ~_PAGE_PSE);
}
static inline pte_t pte_mkglobal(pte_t pte)
@@ -260,7 +255,7 @@ static inline pte_t pte_mkglobal(pte_t pte)
static inline pte_t pte_clrglobal(pte_t pte)
{
- return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
+ return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
}
static inline pte_t pte_mkspecial(pte_t pte)
@@ -305,7 +300,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
return __pgprot(preservebits | addbits);
}
-#define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK)
+#define pte_pgprot(x) __pgprot(pte_flags(x) & ~PTE_MASK)
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
@@ -369,8 +364,15 @@ enum {
PG_LEVEL_4K,
PG_LEVEL_2M,
PG_LEVEL_1G,
+ PG_LEVEL_NUM
};
+#ifdef CONFIG_PROC_FS
+extern void update_page_count(int level, unsigned long pages);
+#else
+static inline void update_page_count(int level, unsigned long pages) { }
+#endif
+
/*
* Helper function that returns the kernel pagetable entry controlling
* the virtual address 'address'. NULL means no pagetable entry present.
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index 199cab107d8..092b39b3a7e 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -88,4 +88,10 @@
#define CX86_ARR_BASE 0xc4
#define CX86_RCR_BASE 0xdc
+#ifdef CONFIG_VM86
+#define X86_VM_MASK X86_EFLAGS_VM
+#else
+#define X86_VM_MASK 0 /* No VM86 support */
+#endif
+
#endif /* __ASM_I386_PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 559105220a4..4ab2ede6f4b 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -263,15 +263,11 @@ struct tss_struct {
struct thread_struct *io_bitmap_owner;
/*
- * Pad the TSS to be cacheline-aligned (size is 0x100):
- */
- unsigned long __cacheline_filler[35];
- /*
* .. and then another 0x100 bytes for the emergency kernel stack:
*/
unsigned long stack[64];
-} __attribute__((packed));
+} ____cacheline_aligned;
DECLARE_PER_CPU(struct tss_struct, init_tss);
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 9f922b0b95d..8a71db803da 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -3,7 +3,12 @@
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
+#include <asm/processor-flags.h>
+#ifdef __KERNEL__
+#include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
+#include <asm/segment.h>
+#endif
#ifndef __ASSEMBLY__
@@ -55,9 +60,6 @@ struct pt_regs {
unsigned long ss;
};
-#include <asm/vm86.h>
-#include <asm/segment.h>
-
#endif /* __KERNEL__ */
#else /* __i386__ */
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
new file mode 100644
index 00000000000..6857f840b24
--- /dev/null
+++ b/include/asm-x86/pvclock-abi.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_X86_PVCLOCK_ABI_H_
+#define _ASM_X86_PVCLOCK_ABI_H_
+#ifndef __ASSEMBLY__
+
+/*
+ * These structs MUST NOT be changed.
+ * They are the ABI between hypervisor and guest OS.
+ * Both Xen and KVM are using this.
+ *
+ * pvclock_vcpu_time_info holds the system time and the tsc timestamp
+ * of the last update. So the guest can use the tsc delta to get a
+ * more precise system time. There is one per virtual cpu.
+ *
+ * pvclock_wall_clock references the point in time when the system
+ * time was zero (usually boot time), thus the guest calculates the
+ * current wall clock by adding the system time.
+ *
+ * Protocol for the "version" fields is: hypervisor raises it (making
+ * it uneven) before it starts updating the fields and raises it again
+ * (making it even) when it is done. Thus the guest can make sure the
+ * time values it got are consistent by checking the version before
+ * and after reading them.
+ */
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+} __attribute__((__packed__));
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PVCLOCK_ABI_H_ */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
new file mode 100644
index 00000000000..85b1bba8e0a
--- /dev/null
+++ b/include/asm-x86/pvclock.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_X86_PVCLOCK_H_
+#define _ASM_X86_PVCLOCK_H_
+
+#include <linux/clocksource.h>
+#include <asm/pvclock-abi.h>
+
+/* some helper functions for xen and kvm pv clock sources */
+cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
+ struct pvclock_vcpu_time_info *vcpu,
+ struct timespec *ts);
+
+#endif /* _ASM_X86_PVCLOCK_H_ */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index e63741f1939..206f355786d 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -14,8 +14,8 @@ struct machine_ops {
extern struct machine_ops machine_ops;
-void machine_real_restart(unsigned char *code, int length);
void native_machine_crash_shutdown(struct pt_regs *regs);
void native_machine_shutdown(void);
+void machine_real_restart(const unsigned char *code, int length);
#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 7400d3ad75c..8c387198ca8 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -19,9 +19,13 @@
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
-# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
#else
# define NEED_PAE 0
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
+#else
# define NEED_CX8 0
#endif
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 2557514d7ef..8d9f0b41ee8 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -6,7 +6,7 @@
#define TRACE_RESUME(user) \
do { \
if (pm_trace_enabled) { \
- void *tracedata; \
+ const void *tracedata; \
asm volatile(_ASM_MOV_UL " $1f,%0\n" \
".section .tracedata,\"a\"\n" \
"1:\t.word %c1\n\t" \
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
index 18da19e89bf..36e71c5f306 100644
--- a/include/asm-x86/seccomp_32.h
+++ b/include/asm-x86/seccomp_32.h
@@ -1,4 +1,5 @@
#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
#include <linux/thread_info.h>
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 553af65a228..76cfe69aa63 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,4 +1,5 @@
#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
#include <linux/thread_info.h>
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index b49369ad9a6..193578cd1fd 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -29,81 +29,116 @@ extern char *strchr(const char *s, int c);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *s);
-static __always_inline void * __memcpy(void * to, const void * from, size_t n)
+static __always_inline void *__memcpy(void *to, const void *from, size_t n)
{
-int d0, d1, d2;
-__asm__ __volatile__(
- "rep ; movsl\n\t"
- "movl %4,%%ecx\n\t"
- "andl $3,%%ecx\n\t"
- "jz 1f\n\t"
- "rep ; movsb\n\t"
- "1:"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from)
- : "memory");
-return (to);
+ int d0, d1, d2;
+ asm volatile("rep ; movsl\n\t"
+ "movl %4,%%ecx\n\t"
+ "andl $3,%%ecx\n\t"
+ "jz 1f\n\t"
+ "rep ; movsb\n\t"
+ "1:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
+ : "memory");
+ return to;
}
/*
* This looks ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
-static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
+static __always_inline void *__constant_memcpy(void *to, const void *from,
+ size_t n)
{
long esi, edi;
- if (!n) return to;
-#if 1 /* want to do small copies with non-string ops? */
+ if (!n)
+ return to;
+
switch (n) {
- case 1: *(char*)to = *(char*)from; return to;
- case 2: *(short*)to = *(short*)from; return to;
- case 4: *(int*)to = *(int*)from; return to;
-#if 1 /* including those doable with two moves? */
- case 3: *(short*)to = *(short*)from;
- *((char*)to+2) = *((char*)from+2); return to;
- case 5: *(int*)to = *(int*)from;
- *((char*)to+4) = *((char*)from+4); return to;
- case 6: *(int*)to = *(int*)from;
- *((short*)to+2) = *((short*)from+2); return to;
- case 8: *(int*)to = *(int*)from;
- *((int*)to+1) = *((int*)from+1); return to;
-#endif
+ case 1:
+ *(char *)to = *(char *)from;
+ return to;
+ case 2:
+ *(short *)to = *(short *)from;
+ return to;
+ case 4:
+ *(int *)to = *(int *)from;
+ return to;
+
+ case 3:
+ *(short *)to = *(short *)from;
+ *((char *)to + 2) = *((char *)from + 2);
+ return to;
+ case 5:
+ *(int *)to = *(int *)from;
+ *((char *)to + 4) = *((char *)from + 4);
+ return to;
+ case 6:
+ *(int *)to = *(int *)from;
+ *((short *)to + 2) = *((short *)from + 2);
+ return to;
+ case 8:
+ *(int *)to = *(int *)from;
+ *((int *)to + 1) = *((int *)from + 1);
+ return to;
}
-#endif
- esi = (long) from;
- edi = (long) to;
- if (n >= 5*4) {
+
+ esi = (long)from;
+ edi = (long)to;
+ if (n >= 5 * 4) {
/* large block: use rep prefix */
int ecx;
- __asm__ __volatile__(
- "rep ; movsl"
- : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
- : "0" (n/4), "1" (edi),"2" (esi)
- : "memory"
+ asm volatile("rep ; movsl"
+ : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
+ : "0" (n / 4), "1" (edi), "2" (esi)
+ : "memory"
);
} else {
/* small block: don't clobber ecx + smaller code */
- if (n >= 4*4) __asm__ __volatile__("movsl"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
- if (n >= 3*4) __asm__ __volatile__("movsl"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
- if (n >= 2*4) __asm__ __volatile__("movsl"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
- if (n >= 1*4) __asm__ __volatile__("movsl"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
+ if (n >= 4 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ if (n >= 3 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ if (n >= 2 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ if (n >= 1 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
}
switch (n % 4) {
/* tail */
- case 0: return to;
- case 1: __asm__ __volatile__("movsb"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
- return to;
- case 2: __asm__ __volatile__("movsw"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
- return to;
- default: __asm__ __volatile__("movsw\n\tmovsb"
- :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory");
- return to;
+ case 0:
+ return to;
+ case 1:
+ asm volatile("movsb"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ return to;
+ case 2:
+ asm volatile("movsw"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ return to;
+ default:
+ asm volatile("movsw\n\tmovsb"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ return to;
}
}
@@ -117,87 +152,86 @@ static __always_inline void * __constant_memcpy(void * to, const void * from, si
* This CPU favours 3DNow strongly (eg AMD Athlon)
*/
-static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
+static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
{
if (len < 512)
return __constant_memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
-static __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
+static inline void *__memcpy3d(void *to, const void *from, size_t len)
{
if (len < 512)
return __memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
-#define memcpy(t, f, n) \
-(__builtin_constant_p(n) ? \
- __constant_memcpy3d((t),(f),(n)) : \
- __memcpy3d((t),(f),(n)))
+#define memcpy(t, f, n) \
+ (__builtin_constant_p((n)) \
+ ? __constant_memcpy3d((t), (f), (n)) \
+ : __memcpy3d((t), (f), (n)))
#else
/*
* No 3D Now!
*/
-
-#define memcpy(t, f, n) \
-(__builtin_constant_p(n) ? \
- __constant_memcpy((t),(f),(n)) : \
- __memcpy((t),(f),(n)))
+
+#define memcpy(t, f, n) \
+ (__builtin_constant_p((n)) \
+ ? __constant_memcpy((t), (f), (n)) \
+ : __memcpy((t), (f), (n)))
#endif
#define __HAVE_ARCH_MEMMOVE
-void *memmove(void * dest,const void * src, size_t n);
+void *memmove(void *dest, const void *src, size_t n);
#define memcmp __builtin_memcmp
#define __HAVE_ARCH_MEMCHR
-extern void *memchr(const void * cs,int c,size_t count);
+extern void *memchr(const void *cs, int c, size_t count);
-static inline void * __memset_generic(void * s, char c,size_t count)
+static inline void *__memset_generic(void *s, char c, size_t count)
{
-int d0, d1;
-__asm__ __volatile__(
- "rep\n\t"
- "stosb"
- : "=&c" (d0), "=&D" (d1)
- :"a" (c),"1" (s),"0" (count)
- :"memory");
-return s;
+ int d0, d1;
+ asm volatile("rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ : "a" (c), "1" (s), "0" (count)
+ : "memory");
+ return s;
}
/* we might want to write optimized versions of these later */
-#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
/*
- * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
-static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+static __always_inline
+void *__constant_c_memset(void *s, unsigned long c, size_t count)
{
-int d0, d1;
-__asm__ __volatile__(
- "rep ; stosl\n\t"
- "testb $2,%b3\n\t"
- "je 1f\n\t"
- "stosw\n"
- "1:\ttestb $1,%b3\n\t"
- "je 2f\n\t"
- "stosb\n"
- "2:"
- :"=&c" (d0), "=&D" (d1)
- :"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
- :"memory");
-return (s);
+ int d0, d1;
+ asm volatile("rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
+ : "memory");
+ return s;
}
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
-extern size_t strnlen(const char * s, size_t count);
+extern size_t strnlen(const char *s, size_t count);
/* end of additional stuff */
#define __HAVE_ARCH_STRSTR
@@ -207,66 +241,85 @@ extern char *strstr(const char *cs, const char *ct);
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
-static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+static __always_inline
+void *__constant_c_and_count_memset(void *s, unsigned long pattern,
+ size_t count)
{
switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern & 0xff;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern & 0xffff;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern & 0xffff;
+ *((unsigned char *)s + 2) = pattern & 0xff;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+
+#define COMMON(x) \
+ asm volatile("rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (eax), "0" (count/4), "1" ((long)s) \
+ : "memory")
+
+ {
+ int d0, d1;
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
+ /* Workaround for broken gcc 4.0 */
+ register unsigned long eax asm("%eax") = pattern;
+#else
+ unsigned long eax = pattern;
+#endif
+
+ switch (count % 4) {
case 0:
+ COMMON("");
return s;
case 1:
- *(unsigned char *)s = pattern & 0xff;
+ COMMON("\n\tstosb");
return s;
case 2:
- *(unsigned short *)s = pattern & 0xffff;
+ COMMON("\n\tstosw");
return s;
- case 3:
- *(unsigned short *)s = pattern & 0xffff;
- *(2+(unsigned char *)s) = pattern & 0xff;
- return s;
- case 4:
- *(unsigned long *)s = pattern;
+ default:
+ COMMON("\n\tstosw\n\tstosb");
return s;
+ }
}
-#define COMMON(x) \
-__asm__ __volatile__( \
- "rep ; stosl" \
- x \
- : "=&c" (d0), "=&D" (d1) \
- : "a" (pattern),"0" (count/4),"1" ((long) s) \
- : "memory")
-{
- int d0, d1;
- switch (count % 4) {
- case 0: COMMON(""); return s;
- case 1: COMMON("\n\tstosb"); return s;
- case 2: COMMON("\n\tstosw"); return s;
- default: COMMON("\n\tstosw\n\tstosb"); return s;
- }
-}
-
+
#undef COMMON
}
-#define __constant_c_x_memset(s, c, count) \
-(__builtin_constant_p(count) ? \
- __constant_c_and_count_memset((s),(c),(count)) : \
- __constant_c_memset((s),(c),(count)))
+#define __constant_c_x_memset(s, c, count) \
+ (__builtin_constant_p(count) \
+ ? __constant_c_and_count_memset((s), (c), (count)) \
+ : __constant_c_memset((s), (c), (count)))
-#define __memset(s, c, count) \
-(__builtin_constant_p(count) ? \
- __constant_count_memset((s),(c),(count)) : \
- __memset_generic((s),(c),(count)))
+#define __memset(s, c, count) \
+ (__builtin_constant_p(count) \
+ ? __constant_count_memset((s), (c), (count)) \
+ : __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) \
-(__builtin_constant_p(c) ? \
- __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
- __memset((s),(c),(count)))
+#define memset(s, c, count) \
+ (__builtin_constant_p(c) \
+ ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
+ (count)) \
+ : __memset((s), (c), (count)))
/*
* find the first occurrence of byte 'c', or 1 past the area if none
*/
#define __HAVE_ARCH_MEMSCAN
-extern void *memscan(void * addr, int c, size_t size);
+extern void *memscan(void *addr, int c, size_t size);
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 24e1c080aa8..8675c6782a7 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,6 +3,9 @@
* Based on code
* Copyright 2001 Patrick Mochel <mochel@osdl.org>
*/
+#ifndef __ASM_X86_32_SUSPEND_H
+#define __ASM_X86_32_SUSPEND_H
+
#include <asm/desc.h>
#include <asm/i387.h>
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
#endif
+
+#endif /* __ASM_X86_32_SUSPEND_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index a2f04cd79b2..7e4c133795a 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -289,7 +289,7 @@ static inline void native_wbinvd(void)
#endif/* CONFIG_PARAVIRT */
-#define stts() write_cr0(8 | read_cr0())
+#define stts() write_cr0(read_cr0() | X86_CR0_TS)
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index fe26e36d0f5..9c1a4a3470d 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -290,7 +290,7 @@ __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
#define __NR_rt_sigqueueinfo 129
__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
#define __NR_rt_sigsuspend 130
-__SYSCALL(__NR_rt_sigsuspend, stub_rt_sigsuspend)
+__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
#define __NR_sigaltstack 131
__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
#define __NR_utime 132
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index 074b357146d..5ce351325e0 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -14,12 +14,6 @@
#include <asm/processor-flags.h>
-#ifdef CONFIG_VM86
-#define X86_VM_MASK X86_EFLAGS_VM
-#else
-#define X86_VM_MASK 0 /* No VM86 support */
-#endif
-
#define BIOSSEG 0x0f000
#define CPU_086 0
@@ -121,7 +115,6 @@ struct vm86plus_info_struct {
unsigned long is_vm86pus:1; /* for vm86 internal use */
unsigned char vm86dbg_intxxtab[32]; /* for debugger */
};
-
struct vm86plus_struct {
struct vm86_regs regs;
unsigned long flags;
@@ -133,6 +126,9 @@ struct vm86plus_struct {
};
#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
/*
* This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
* mode - the main change is that the old segment descriptors aren't
@@ -141,7 +137,6 @@ struct vm86plus_struct {
* at the end of the structure. Look at ptrace.h to see the "normal"
* setup. For user space layout see 'struct vm86_regs' above.
*/
-#include <asm/ptrace.h>
struct kernel_vm86_regs {
/*
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index c2ccd997ed3..2a4f9b41d68 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -176,9 +176,9 @@ HYPERVISOR_fpu_taskswitch(int set)
}
static inline int
-HYPERVISOR_sched_op(int cmd, unsigned long arg)
+HYPERVISOR_sched_op(int cmd, void *arg)
{
- return _hypercall2(int, sched_op, cmd, arg);
+ return _hypercall2(int, sched_op_new, cmd, arg);
}
static inline long
@@ -315,6 +315,13 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
}
static inline void
+MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
+{
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = set;
+}
+
+static inline void
MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
pte_t new_val, unsigned long flags)
{
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index baf3a4dce28..377c04591c1 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -26,15 +26,20 @@ typedef struct xpaddr {
#define FOREIGN_FRAME_BIT (1UL<<31)
#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
-extern unsigned long *phys_to_machine_mapping;
+/* Maximum amount of memory we can handle in a domain in pages */
+#define MAX_DOMAIN_PAGES \
+ ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
+
+
+extern unsigned long get_phys_to_machine(unsigned long pfn);
+extern void set_phys_to_machine(unsigned long pfn, unsigned long mfn);
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
if (xen_feature(XENFEAT_auto_translated_physmap))
return pfn;
- return phys_to_machine_mapping[(unsigned int)(pfn)] &
- ~FOREIGN_FRAME_BIT;
+ return get_phys_to_machine(pfn) & ~FOREIGN_FRAME_BIT;
}
static inline int phys_to_machine_mapping_valid(unsigned long pfn)
@@ -42,7 +47,7 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
if (xen_feature(XENFEAT_auto_translated_physmap))
return 1;
- return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
+ return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
}
static inline unsigned long mfn_to_pfn(unsigned long mfn)
@@ -106,20 +111,12 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
unsigned long pfn = mfn_to_pfn(mfn);
if ((pfn < max_mapnr)
&& !xen_feature(XENFEAT_auto_translated_physmap)
- && (phys_to_machine_mapping[pfn] != mfn))
+ && (get_phys_to_machine(pfn) != mfn))
return max_mapnr; /* force !pfn_valid() */
+ /* XXX fixme; not true with sparsemem */
return pfn;
}
-static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return;
- }
- phys_to_machine_mapping[pfn] = mfn;
-}
-
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
@@ -150,13 +147,9 @@ static inline pte_t __pte_ma(pteval_t x)
return (pte_t) { .pte = x };
}
-#ifdef CONFIG_X86_PAE
#define pmd_val_ma(v) ((v).pmd)
#define pud_val_ma(v) ((v).pgd.pgd)
#define __pmd_ma(x) ((pmd_t) { (x) } )
-#else /* !X86_PAE */
-#define pmd_val_ma(v) ((v).pud.pgd.pgd)
-#endif /* CONFIG_X86_PAE */
#define pgd_val_ma(x) ((x).pgd)
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index 067b5c1835a..921b4584044 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_32_H
+#define ASM_X86__XOR_32_H
+
/*
* Optimized RAID-5 checksumming functions for MMX and SSE.
*
@@ -881,3 +884,5 @@ do { \
deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) \
(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
+
+#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 24957e39ac8..2d3a18de295 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_64_H
+#define ASM_X86__XOR_64_H
+
/*
* Optimized RAID-5 checksumming functions for MMX and SSE.
*
@@ -354,3 +357,5 @@ do { \
We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+
+#endif /* ASM_X86__XOR_64_H */