aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/bitops_32.h7
-rw-r--r--include/asm-x86/bitops_64.h57
-rw-r--r--include/asm-x86/gart.h29
-rw-r--r--include/asm-x86/iommu.h4
-rw-r--r--include/asm-x86/lguest_hcall.h16
-rw-r--r--include/asm-x86/pci_64.h2
-rw-r--r--include/asm-x86/smp_32.h9
7 files changed, 81 insertions, 43 deletions
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index 36ebb5b02b4..0b40f6d20be 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -183,9 +183,12 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
* @nr: Bit to set
* @addr: Address to count from
*
- * This is the same as test_and_set_bit on x86
+ * This is the same as test_and_set_bit on x86.
*/
-#define test_and_set_bit_lock test_and_set_bit
+static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
/**
* __test_and_set_bit - Set a bit and return its old value
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index b4d47940b95..766bcc0470a 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -29,7 +29,7 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
@@ -46,7 +46,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __set_bit(int nr, volatile void * addr)
+static inline void __set_bit(int nr, volatile void *addr)
{
__asm__ volatile(
"btsl %1,%0"
@@ -64,7 +64,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static __inline__ void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
@@ -86,7 +86,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
clear_bit(nr, addr);
}
-static __inline__ void __clear_bit(int nr, volatile void * addr)
+static inline void __clear_bit(int nr, volatile void *addr)
{
__asm__ __volatile__(
"btrl %1,%0"
@@ -124,7 +124,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void __change_bit(int nr, volatile void * addr)
+static inline void __change_bit(int nr, volatile void *addr)
{
__asm__ __volatile__(
"btcl %1,%0"
@@ -141,7 +141,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static __inline__ void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
@@ -157,7 +157,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
{
int oldbit;
@@ -173,9 +173,12 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
* @nr: Bit to set
* @addr: Address to count from
*
- * This is the same as test_and_set_bit on x86
+ * This is the same as test_and_set_bit on x86.
*/
-#define test_and_set_bit_lock test_and_set_bit
+static inline int test_and_set_bit_lock(int nr, volatile void *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
/**
* __test_and_set_bit - Set a bit and return its old value
@@ -186,7 +189,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+static inline int __test_and_set_bit(int nr, volatile void *addr)
{
int oldbit;
@@ -205,7 +208,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
{
int oldbit;
@@ -225,7 +228,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+static inline int __test_and_clear_bit(int nr, volatile void *addr)
{
int oldbit;
@@ -237,7 +240,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
}
/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
+static inline int __test_and_change_bit(int nr, volatile void *addr)
{
int oldbit;
@@ -256,7 +259,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
{
int oldbit;
@@ -273,15 +276,15 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static int test_bit(int nr, const volatile void * addr);
+static int test_bit(int nr, const volatile void *addr);
#endif
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
+static inline int constant_test_bit(int nr, const volatile void *addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
-static __inline__ int variable_test_bit(int nr, volatile const void * addr)
+static inline int variable_test_bit(int nr, volatile const void *addr)
{
int oldbit;
@@ -299,10 +302,10 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr)
#undef ADDR
-extern long find_first_zero_bit(const unsigned long * addr, unsigned long size);
-extern long find_next_zero_bit (const unsigned long * addr, long size, long offset);
-extern long find_first_bit(const unsigned long * addr, unsigned long size);
-extern long find_next_bit(const unsigned long * addr, long size, long offset);
+extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
+extern long find_first_bit(const unsigned long *addr, unsigned long size);
+extern long find_next_bit(const unsigned long *addr, long size, long offset);
/* return index of first bet set in val or max when no bit is set */
static inline long __scanbit(unsigned long val, unsigned long max)
@@ -363,7 +366,7 @@ static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
-static __inline__ unsigned long ffz(unsigned long word)
+static inline unsigned long ffz(unsigned long word)
{
__asm__("bsfq %1,%0"
:"=r" (word)
@@ -377,7 +380,7 @@ static __inline__ unsigned long ffz(unsigned long word)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __inline__ unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
__asm__("bsfq %1,%0"
:"=r" (word)
@@ -391,7 +394,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
-static __inline__ unsigned long __fls(unsigned long word)
+static inline unsigned long __fls(unsigned long word)
{
__asm__("bsrq %1,%0"
:"=r" (word)
@@ -411,7 +414,7 @@ static __inline__ unsigned long __fls(unsigned long word)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static __inline__ int ffs(int x)
+static inline int ffs(int x)
{
int r;
@@ -427,7 +430,7 @@ static __inline__ int ffs(int x)
*
* This is defined the same way as fls.
*/
-static __inline__ int fls64(__u64 x)
+static inline int fls64(__u64 x)
{
if (x == 0)
return 0;
@@ -440,7 +443,7 @@ static __inline__ int fls64(__u64 x)
*
* This is defined the same way as ffs.
*/
-static __inline__ int fls(int x)
+static inline int fls(int x)
{
int r;
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
new file mode 100644
index 00000000000..f704c50519b
--- /dev/null
+++ b/include/asm-x86/gart.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_X8664_IOMMU_H
+#define _ASM_X8664_IOMMU_H 1
+
+extern void pci_iommu_shutdown(void);
+extern void no_iommu_init(void);
+extern int force_iommu, no_iommu;
+extern int iommu_detected;
+#ifdef CONFIG_GART_IOMMU
+extern void gart_iommu_init(void);
+extern void gart_iommu_shutdown(void);
+extern void __init gart_parse_options(char *);
+extern void gart_iommu_hole_init(void);
+extern int fallback_aper_order;
+extern int fallback_aper_force;
+extern int gart_iommu_aperture;
+extern int gart_iommu_aperture_allowed;
+extern int gart_iommu_aperture_disabled;
+extern int fix_aperture;
+#else
+#define gart_iommu_aperture 0
+#define gart_iommu_aperture_allowed 0
+
+static inline void gart_iommu_shutdown(void)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 5af471f228e..07862fdd23c 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_X8664_IOMMU_H
-#define _ASM_X8664_IOMMU_H 1
+#ifndef _ASM_X8664_GART_H
+#define _ASM_X8664_GART_H 1
extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index f948491eb56..9c5092b6aa9 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -18,12 +18,17 @@
#define LHCALL_LOAD_TLS 16
#define LHCALL_NOTIFY 17
+#define LGUEST_TRAP_ENTRY 0x1F
+
+#ifndef __ASSEMBLY__
+#include <asm/hw_irq.h>
+
/*G:031 First, how does our Guest contact the Host to ask for privileged
* operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself.
*
* Our hypercall mechanism uses the highest unused trap code (traps 32 and
- * above are used by real hardware interrupts). Seventeen hypercalls are
+ * above are used by real hardware interrupts). Fifteen hypercalls are
* available: the hypercall number is put in the %eax register, and the
* arguments (when required) are placed in %edx, %ebx and %ecx. If a return
* value makes sense, it's returned in %eax.
@@ -31,20 +36,15 @@
* Grossly invalid calls result in Sudden Death at the hands of the vengeful
* Host, rather than returning failure. This reflects Winston Churchill's
* definition of a gentleman: "someone who is only rude intentionally". */
-#define LGUEST_TRAP_ENTRY 0x1F
-
-#ifndef __ASSEMBLY__
-#include <asm/hw_irq.h>
-
static inline unsigned long
hcall(unsigned long call,
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
/* "int" is the Intel instruction to trigger a trap. */
asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
- /* The call is in %eax (aka "a"), and can be replaced */
+ /* The call in %eax (aka "a") might be overwritten */
: "=a"(call)
- /* The other arguments are in %eax, %edx, %ebx & %ecx */
+ /* The arguments are in %eax, %edx, %ebx & %ecx */
: "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
/* "memory" means this might write somewhere in memory.
* This isn't true for all calls, but it's safe to tell
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index 9baa46d9f59..ef54226a932 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -37,7 +37,7 @@ extern int iommu_setup(char *opt);
*/
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
-#if defined(CONFIG_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
+#if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 7056d868452..e10b7affdfe 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -94,9 +94,12 @@ static inline void smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
}
-extern int smp_call_function_mask(cpumask_t mask,
- void (*func) (void *info), void *info,
- int wait);
+static inline int smp_call_function_mask(cpumask_t mask,
+ void (*func) (void *info), void *info,
+ int wait)
+{
+ return smp_ops.smp_call_function_mask(mask, func, info, wait);
+}
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);