aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/elf.h4
-rw-r--r--include/asm-x86/es7000/apic.h2
-rw-r--r--include/asm-x86/iommu.h2
-rw-r--r--include/asm-x86/kvm.h22
-rw-r--r--include/asm-x86/kvm_host.h82
-rw-r--r--include/asm-x86/msr-index.h3
-rw-r--r--include/asm-x86/page_32.h2
-rw-r--r--include/asm-x86/page_64.h1
-rw-r--r--include/asm-x86/pgtable.h7
-rw-r--r--include/asm-x86/pvclock.h1
-rw-r--r--include/asm-x86/summit/apic.h2
-rw-r--r--include/asm-x86/xen/page.h4
12 files changed, 65 insertions, 67 deletions
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 5c4745bec90..26bc15f01e7 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -186,7 +186,7 @@ do { \
set_fs(USER_DS); \
} while (0)
-#define COMPAT_SET_PERSONALITY(ex, ibcs2) \
+#define COMPAT_SET_PERSONALITY(ex) \
do { \
if (test_thread_flag(TIF_IA32)) \
clear_thread_flag(TIF_ABI_PENDING); \
@@ -267,7 +267,7 @@ extern int force_personality32;
For the moment, we have only optimizations for the Intel generations,
but that could change... */
-#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
+#define SET_PERSONALITY(ex) set_personality_64bit()
/*
* An executable for which elf_read_implies_exec() returns TRUE will
diff --git a/include/asm-x86/es7000/apic.h b/include/asm-x86/es7000/apic.h
index 750afada5fb..380f0b4f17e 100644
--- a/include/asm-x86/es7000/apic.h
+++ b/include/asm-x86/es7000/apic.h
@@ -170,7 +170,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n",__FUNCTION__);
+ printk ("%s: Not a valid mask!\n", __func__);
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 546ad3110fe..961e746da97 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -8,7 +8,7 @@ extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int dmar_disabled;
-extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
+extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
#ifdef CONFIG_GART_IOMMU
extern int gart_iommu_aperture;
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 78e954db1e7..ba0dd791fad 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -208,26 +208,4 @@ struct kvm_pit_channel_state {
struct kvm_pit_state {
struct kvm_pit_channel_state channels[3];
};
-
-#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
-
#endif /* ASM_X86__KVM_H */
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 69794547f51..411fb8cfb24 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -57,6 +57,10 @@
#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
#define DE_VECTOR 0
+#define DB_VECTOR 1
+#define BP_VECTOR 3
+#define OF_VECTOR 4
+#define BR_VECTOR 5
#define UD_VECTOR 6
#define NM_VECTOR 7
#define DF_VECTOR 8
@@ -65,6 +69,7 @@
#define SS_VECTOR 12
#define GP_VECTOR 13
#define PF_VECTOR 14
+#define MF_VECTOR 16
#define MC_VECTOR 18
#define SELECTOR_TI_MASK (1 << 2)
@@ -89,7 +94,7 @@ extern struct list_head vm_list;
struct kvm_vcpu;
struct kvm;
-enum {
+enum kvm_reg {
VCPU_REGS_RAX = 0,
VCPU_REGS_RCX = 1,
VCPU_REGS_RDX = 2,
@@ -108,6 +113,7 @@ enum {
VCPU_REGS_R14 = 14,
VCPU_REGS_R15 = 15,
#endif
+ VCPU_REGS_RIP,
NR_VCPU_REGS
};
@@ -189,10 +195,20 @@ struct kvm_mmu_page {
*/
int multimapped; /* More than one parent_pte? */
int root_count; /* Currently serving as active root */
+ bool unsync;
+ bool unsync_children;
union {
u64 *parent_pte; /* !multimapped */
struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
};
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
+};
+
+struct kvm_pv_mmu_op_buffer {
+ void *ptr;
+ unsigned len;
+ unsigned processed;
+ char buf[512] __aligned(sizeof(long));
};
/*
@@ -207,6 +223,9 @@ struct kvm_mmu {
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
void (*prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page);
+ int (*sync_page)(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *sp);
+ void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
hpa_t root_hpa;
int root_level;
int shadow_root_level;
@@ -219,8 +238,13 @@ struct kvm_vcpu_arch {
int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
- unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
- unsigned long rip; /* needs vcpu_load_rsp_rip() */
+ /*
+ * rip and regs accesses must go through
+ * kvm_{register,rip}_{read,write} functions.
+ */
+ unsigned long regs[NR_VCPU_REGS];
+ u32 regs_avail;
+ u32 regs_dirty;
unsigned long cr0;
unsigned long cr2;
@@ -237,6 +261,9 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting;
struct kvm_mmu mmu;
+ /* only needed in kvm_pv_mmu_op() path, but it's hot so
+ * put it here to avoid allocation */
+ struct kvm_pv_mmu_op_buffer mmu_op_buffer;
struct kvm_mmu_memory_cache mmu_pte_chain_cache;
struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
@@ -269,6 +296,11 @@ struct kvm_vcpu_arch {
u32 error_code;
} exception;
+ struct kvm_queued_interrupt {
+ bool pending;
+ u8 nr;
+ } interrupt;
+
struct {
int active;
u8 save_iopl;
@@ -294,6 +326,7 @@ struct kvm_vcpu_arch {
struct page *time_page;
bool nmi_pending;
+ bool nmi_injected;
u64 mtrr[0x100];
};
@@ -316,9 +349,12 @@ struct kvm_arch{
* Hash table of struct kvm_mmu_page.
*/
struct list_head active_mmu_pages;
+ struct list_head assigned_dev_head;
+ struct dmar_domain *intel_iommu_domain;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
struct kvm_pit *vpit;
+ struct hlist_head irq_ack_notifier_list;
int round_robin_prev_vcpu;
unsigned int tss_addr;
@@ -338,6 +374,7 @@ struct kvm_vm_stat {
u32 mmu_flooded;
u32 mmu_recycled;
u32 mmu_cache_miss;
+ u32 mmu_unsync;
u32 remote_tlb_flush;
u32 lpages;
};
@@ -364,6 +401,7 @@ struct kvm_vcpu_stat {
u32 insn_emulation;
u32 insn_emulation_fail;
u32 hypercalls;
+ u32 irq_injections;
};
struct descriptor_table {
@@ -414,8 +452,7 @@ struct kvm_x86_ops {
unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
int *exception);
- void (*cache_regs)(struct kvm_vcpu *vcpu);
- void (*decache_regs)(struct kvm_vcpu *vcpu);
+ void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -528,6 +565,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
u32 error_code);
+void kvm_pic_set_irq(void *opaque, int irq, int level);
+
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void fx_init(struct kvm_vcpu *vcpu);
@@ -550,12 +589,14 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
@@ -686,33 +727,6 @@ enum {
TASK_SWITCH_GATE = 3,
};
-#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 5, d1, d2, d3, d4, d5)
-#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 4, d1, d2, d3, d4, 0)
-#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 3, d1, d2, d3, 0, 0)
-#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 2, d1, d2, 0, 0, 0)
-#define KVMTRACE_1D(evt, vcpu, d1, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 1, d1, 0, 0, 0, 0)
-#define KVMTRACE_0D(evt, vcpu, name) \
- trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
- vcpu, 0, 0, 0, 0, 0, 0)
-
-#ifdef CONFIG_64BIT
-# define KVM_EX_ENTRY ".quad"
-# define KVM_EX_PUSH "pushq"
-#else
-# define KVM_EX_ENTRY ".long"
-# define KVM_EX_PUSH "pushl"
-#endif
-
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
@@ -724,11 +738,11 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
"666: " insn "\n\t" \
".pushsection .fixup, \"ax\" \n" \
"667: \n\t" \
- KVM_EX_PUSH " $666b \n\t" \
+ __ASM_SIZE(push) " $666b \n\t" \
"jmp kvm_handle_fault_on_reboot \n\t" \
".popsection \n\t" \
".pushsection __ex_table, \"a\" \n\t" \
- KVM_EX_ENTRY " 666b, 667b \n\t" \
+ _ASM_PTR " 666b, 667b \n\t" \
".popsection"
#define KVM_ARCH_WANT_MMU_NOTIFIER
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 0bb43301a20..dabd10f0bbe 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -178,6 +178,9 @@
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED (1<<2)
+
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index e8d80d1de23..bdf5dba4cfb 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -39,7 +39,6 @@ typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
-typedef u64 phys_addr_t;
typedef union {
struct {
@@ -60,7 +59,6 @@ typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
-typedef unsigned long phys_addr_t;
typedef union {
pteval_t pte;
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index 5e64acfed0a..49380b8c7e2 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -79,7 +79,6 @@ typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
-typedef unsigned long phys_addr_t;
typedef struct page *pgtable_t;
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 182f9d4c570..88a53b1a17f 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -148,8 +148,13 @@
#ifdef CONFIG_X86_64
#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
#else
+/*
+ * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
-#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
#endif
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
index 1a38f683480..ad29e277fd6 100644
--- a/include/asm-x86/pvclock.h
+++ b/include/asm-x86/pvclock.h
@@ -6,6 +6,7 @@
/* some helper functions for xen and kvm pv clock sources */
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct pvclock_vcpu_time_info *vcpu,
struct timespec *ts);
diff --git a/include/asm-x86/summit/apic.h b/include/asm-x86/summit/apic.h
index 0f68037b8f2..9b3070f1c2a 100644
--- a/include/asm-x86/summit/apic.h
+++ b/include/asm-x86/summit/apic.h
@@ -159,7 +159,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n",__FUNCTION__);
+ printk ("%s: Not a valid mask!\n", __func__);
return 0xFF;
}
apicid = apicid | new_apicid;
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index c50185dccec..d5eada0a48d 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -76,13 +76,13 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
- return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
+ return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
}
static inline xpaddr_t machine_to_phys(xmaddr_t machine)
{
unsigned offset = machine.maddr & ~PAGE_MASK;
- return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
+ return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
}
/*