aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86/xen/hypercall.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/xen/hypercall.h')
-rw-r--r--include/asm-x86/xen/hypercall.h274
1 files changed, 191 insertions, 83 deletions
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index c2ccd997ed3..91cb7fd5c12 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -40,83 +40,157 @@
#include <xen/interface/sched.h>
#include <xen/interface/physdev.h>
+/*
+ * The hypercall asms have to meet several constraints:
+ * - Work on 32- and 64-bit.
+ * The two architectures put their arguments in different sets of
+ * registers.
+ *
+ * - Work around asm syntax quirks
+ * It isn't possible to specify one of the rNN registers in a
+ * constraint, so we use explicit register variables to get the
+ * args into the right place.
+ *
+ * - Mark all registers as potentially clobbered
+ * Even unused parameters can be clobbered by the hypervisor, so we
+ * need to make sure gcc knows it.
+ *
+ * - Avoid compiler bugs.
+ * This is the tricky part. Because x86_32 has such a constrained
+ * register set, gcc versions below 4.3 have trouble generating
+ * code when all the arg registers and memory are trashed by the
+ * asm. There are syntactically simpler ways of achieving the
+ * semantics below, but they cause the compiler to crash.
+ *
+ * The only combination I found which works is:
+ * - assign the __argX variables first
+ * - list all actually used parameters as "+r" (__argX)
+ * - clobber the rest
+ *
+ * The result certainly isn't pretty, and it really shows up cpp's
+ * weakness as as macro language. Sorry. (But let's just give thanks
+ * there aren't more than 5 arguments...)
+ */
+
extern struct { char _entry[32]; } hypercall_page[];
+#define __HYPERCALL "call hypercall_page+%c[offset]"
+#define __HYPERCALL_ENTRY(x) \
+ [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
+
+#ifdef CONFIG_X86_32
+#define __HYPERCALL_RETREG "eax"
+#define __HYPERCALL_ARG1REG "ebx"
+#define __HYPERCALL_ARG2REG "ecx"
+#define __HYPERCALL_ARG3REG "edx"
+#define __HYPERCALL_ARG4REG "esi"
+#define __HYPERCALL_ARG5REG "edi"
+#else
+#define __HYPERCALL_RETREG "rax"
+#define __HYPERCALL_ARG1REG "rdi"
+#define __HYPERCALL_ARG2REG "rsi"
+#define __HYPERCALL_ARG3REG "rdx"
+#define __HYPERCALL_ARG4REG "r10"
+#define __HYPERCALL_ARG5REG "r8"
+#endif
+
+#define __HYPERCALL_DECLS \
+ register unsigned long __res asm(__HYPERCALL_RETREG); \
+ register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
+ register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
+ register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
+ register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
+ register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
+
+#define __HYPERCALL_0PARAM "=r" (__res)
+#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
+#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
+#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
+#define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
+#define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
+
+#define __HYPERCALL_0ARG()
+#define __HYPERCALL_1ARG(a1) \
+ __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
+#define __HYPERCALL_2ARG(a1,a2) \
+ __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
+#define __HYPERCALL_3ARG(a1,a2,a3) \
+ __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
+#define __HYPERCALL_4ARG(a1,a2,a3,a4) \
+ __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
+#define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
+ __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
+
+#define __HYPERCALL_CLOBBER5 "memory"
+#define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
+#define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
+#define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
+#define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
+#define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
+
#define _hypercall0(type, name) \
({ \
- long __res; \
- asm volatile ( \
- "call %[call]" \
- : "=a" (__res) \
- : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
- : "memory" ); \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_0ARG(); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_0PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER0); \
(type)__res; \
})
#define _hypercall1(type, name, a1) \
({ \
- long __res, __ign1; \
- asm volatile ( \
- "call %[call]" \
- : "=a" (__res), "=b" (__ign1) \
- : "1" ((long)(a1)), \
- [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
- : "memory" ); \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_1ARG(a1); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_1PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER1); \
(type)__res; \
})
#define _hypercall2(type, name, a1, a2) \
({ \
- long __res, __ign1, __ign2; \
- asm volatile ( \
- "call %[call]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
- : "1" ((long)(a1)), "2" ((long)(a2)), \
- [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
- : "memory" ); \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_2ARG(a1, a2); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_2PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER2); \
(type)__res; \
})
#define _hypercall3(type, name, a1, a2, a3) \
({ \
- long __res, __ign1, __ign2, __ign3; \
- asm volatile ( \
- "call %[call]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
- "=d" (__ign3) \
- : "1" ((long)(a1)), "2" ((long)(a2)), \
- "3" ((long)(a3)), \
- [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
- : "memory" ); \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_3ARG(a1, a2, a3); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_3PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER3); \
(type)__res; \
})
#define _hypercall4(type, name, a1, a2, a3, a4) \
({ \
- long __res, __ign1, __ign2, __ign3, __ign4; \
- asm volatile ( \
- "call %[call]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
- "=d" (__ign3), "=S" (__ign4) \
- : "1" ((long)(a1)), "2" ((long)(a2)), \
- "3" ((long)(a3)), "4" ((long)(a4)), \
- [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
- : "memory" ); \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_4ARG(a1, a2, a3, a4); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_4PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER4); \
(type)__res; \
})
#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
({ \
- long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
- asm volatile ( \
- "call %[call]" \
- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
- "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
- : "1" ((long)(a1)), "2" ((long)(a2)), \
- "3" ((long)(a3)), "4" ((long)(a4)), \
- "5" ((long)(a5)), \
- [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
- : "memory" ); \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_5PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER5); \
(type)__res; \
})
@@ -152,6 +226,7 @@ HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
return _hypercall2(int, stack_switch, ss, esp);
}
+#ifdef CONFIG_X86_32
static inline int
HYPERVISOR_set_callbacks(unsigned long event_selector,
unsigned long event_address,
@@ -162,6 +237,17 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
event_selector, event_address,
failsafe_selector, failsafe_address);
}
+#else /* CONFIG_X86_64 */
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_address,
+ unsigned long failsafe_address,
+ unsigned long syscall_address)
+{
+ return _hypercall3(int, set_callbacks,
+ event_address, failsafe_address,
+ syscall_address);
+}
+#endif /* CONFIG_X86_{32,64} */
static inline int
HYPERVISOR_callback_op(int cmd, void *arg)
@@ -176,9 +262,9 @@ HYPERVISOR_fpu_taskswitch(int set)
}
static inline int
-HYPERVISOR_sched_op(int cmd, unsigned long arg)
+HYPERVISOR_sched_op(int cmd, void *arg)
{
- return _hypercall2(int, sched_op, cmd, arg);
+ return _hypercall2(int, sched_op_new, cmd, arg);
}
static inline long
@@ -223,12 +309,12 @@ static inline int
HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
unsigned long flags)
{
- unsigned long pte_hi = 0;
-#ifdef CONFIG_X86_PAE
- pte_hi = new_val.pte_high;
-#endif
- return _hypercall4(int, update_va_mapping, va,
- new_val.pte_low, pte_hi, flags);
+ if (sizeof(new_val) == sizeof(long))
+ return _hypercall3(int, update_va_mapping, va,
+ new_val.pte, flags);
+ else
+ return _hypercall4(int, update_va_mapping, va,
+ new_val.pte, new_val.pte >> 32, flags);
}
static inline int
@@ -281,12 +367,13 @@ static inline int
HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
unsigned long flags, domid_t domid)
{
- unsigned long pte_hi = 0;
-#ifdef CONFIG_X86_PAE
- pte_hi = new_val.pte_high;
-#endif
- return _hypercall5(int, update_va_mapping_otherdomain, va,
- new_val.pte_low, pte_hi, flags, domid);
+ if (sizeof(new_val) == sizeof(long))
+ return _hypercall4(int, update_va_mapping_otherdomain, va,
+ new_val.pte, flags, domid);
+ else
+ return _hypercall5(int, update_va_mapping_otherdomain, va,
+ new_val.pte, new_val.pte >> 32,
+ flags, domid);
}
static inline int
@@ -301,6 +388,14 @@ HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
}
+#ifdef CONFIG_X86_64
+static inline int
+HYPERVISOR_set_segment_base(int reg, unsigned long value)
+{
+ return _hypercall2(int, set_segment_base, reg, value);
+}
+#endif
+
static inline int
HYPERVISOR_suspend(unsigned long srec)
{
@@ -315,19 +410,26 @@ HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
}
static inline void
+MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
+{
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = set;
+}
+
+static inline void
MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
pte_t new_val, unsigned long flags)
{
mcl->op = __HYPERVISOR_update_va_mapping;
mcl->args[0] = va;
-#ifdef CONFIG_X86_PAE
- mcl->args[1] = new_val.pte_low;
- mcl->args[2] = new_val.pte_high;
-#else
- mcl->args[1] = new_val.pte_low;
- mcl->args[2] = 0;
-#endif
- mcl->args[3] = flags;
+ if (sizeof(new_val) == sizeof(long)) {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = flags;
+ } else {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = new_val.pte >> 32;
+ mcl->args[3] = flags;
+ }
}
static inline void
@@ -347,15 +449,16 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v
{
mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
mcl->args[0] = va;
-#ifdef CONFIG_X86_PAE
- mcl->args[1] = new_val.pte_low;
- mcl->args[2] = new_val.pte_high;
-#else
- mcl->args[1] = new_val.pte_low;
- mcl->args[2] = 0;
-#endif
- mcl->args[3] = flags;
- mcl->args[4] = domid;
+ if (sizeof(new_val) == sizeof(long)) {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = flags;
+ mcl->args[3] = domid;
+ } else {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = new_val.pte >> 32;
+ mcl->args[3] = flags;
+ mcl->args[4] = domid;
+ }
}
static inline void
@@ -363,10 +466,15 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
mcl->op = __HYPERVISOR_update_descriptor;
- mcl->args[0] = maddr;
- mcl->args[1] = maddr >> 32;
- mcl->args[2] = desc.a;
- mcl->args[3] = desc.b;
+ if (sizeof(maddr) == sizeof(long)) {
+ mcl->args[0] = maddr;
+ mcl->args[1] = *(unsigned long *)&desc;
+ } else {
+ mcl->args[0] = maddr;
+ mcl->args[1] = maddr >> 32;
+ mcl->args[2] = desc.a;
+ mcl->args[3] = desc.b;
+ }
}
static inline void