diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-20 10:48:28 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-20 10:48:31 +0100 |
commit | c032a2de4c1a82187e9a754511043be47c8a92b5 (patch) | |
tree | 6d20bfcff683555b641a376ffdffb2dbc1f1599a /arch/x86/include/asm | |
parent | 722024dbb74f3ea316c285c0a71a4512e113b0c4 (diff) | |
parent | cbe9ee00cea58d1f77b172fe22a51080e90877f2 (diff) |
Merge branch 'x86/cleanups' into x86/irq
[ merged x86/cleanups into x86/irq to enable a wider IRQ entry code
patch to be applied, which depends on a cleanup patch in x86/cleanups. ]
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/acpi.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/bitops.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/byteorder.h | 74 | ||||
-rw-r--r-- | arch/x86/include/asm/iomap.h | 30 | ||||
-rw-r--r-- | arch/x86/include/asm/iommu.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/irq.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/irq_regs_32.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/tsc.h | 8 |
8 files changed, 71 insertions, 59 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d676d8ecde..9830681446a 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void) acpi_pci_disabled = 1; acpi_noirq_set(); } -extern int acpi_irq_balance_set(char *str); /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 36001032271..9fa9dcdf344 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) */ static inline void change_bit(int nr, volatile unsigned long *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); + if (IS_IMMEDIATE(nr)) { + asm volatile(LOCK_PREFIX "xorb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr))); + } else { + asm volatile(LOCK_PREFIX "btc %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } } /** diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index e02ae2d89ac..f110ad417df 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h @@ -4,26 +4,33 @@ #include <asm/types.h> #include <linux/compiler.h> -#ifdef __GNUC__ +#define __LITTLE_ENDIAN -#ifdef __i386__ - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 __arch_swab32(__u32 val) { -#ifdef CONFIG_X86_BSWAP - asm("bswap %0" : "=r" (x) : "0" (x)); -#else +#ifdef __i386__ +# ifdef CONFIG_X86_BSWAP + asm("bswap %0" : "=r" (val) : "0" (val)); +# else asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ "rorl $16,%0\n\t" /* swap words */ "xchgb %b0,%h0" /* swap higher bytes */ - : "=q" (x) - : "0" (x)); + : "=q" (val) + : "0" (val)); +# endif + +#else /* __i386__ */ + asm("bswapl %0" + : "=r" (val) + : "0" (val)); #endif - return x; + return val; } +#define __arch_swab32 __arch_swab32 -static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) +static inline __attribute_const__ __u64 __arch_swab64(__u64 val) { +#ifdef __i386__ union { struct { __u32 a; @@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) __u64 u; } v; v.u = val; -#ifdef CONFIG_X86_BSWAP +# ifdef CONFIG_X86_BSWAP asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#else - v.s.a = ___arch__swab32(v.s.a); - v.s.b = ___arch__swab32(v.s.b); +# else + v.s.a = __arch_swab32(v.s.a); + v.s.b = __arch_swab32(v.s.b); asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#endif +# endif return v.u; -} - #else /* __i386__ */ - -static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) -{ asm("bswapq %0" - : "=r" (x) - : "0" (x)); - return x; -} - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ - asm("bswapl %0" - : "=r" (x) - : "0" (x)); - return x; -} - + : "=r" (val) + : "0" (val)); + return val; #endif +} +#define __arch_swab64 __arch_swab64 -/* Do not define swab16. Gcc is smart enough to recognize "C" version and - convert it into rotation or exhange. */ - -#define __arch__swab64(x) ___arch__swab64(x) -#define __arch__swab32(x) ___arch__swab32(x) - -#define __BYTEORDER_HAS_U64__ - -#endif /* __GNUC__ */ - -#include <linux/byteorder/little_endian.h> +#include <linux/byteorder.h> #endif /* _ASM_X86_BYTEORDER_H */ diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h new file mode 100644 index 00000000000..c1f06289b14 --- /dev/null +++ b/arch/x86/include/asm/iomap.h @@ -0,0 +1,30 @@ +/* + * Copyright © 2008 Ingo Molnar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +void * +iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); + +void +iounmap_atomic(void *kvaddr, enum km_type type); diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index e4a552d4446..0b500c5b644 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -6,7 +6,6 @@ extern void no_iommu_init(void); extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; -extern int dmar_disabled; extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index bae0eda9548..28e409fc73f 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq) # endif #endif -#ifdef CONFIG_IRQBALANCE -extern int irqbalance_disable(char *str); -#endif - #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpumask.h> extern void fixup_irqs(cpumask_t map); diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h index af2f02d27fc..86afd747345 100644 --- a/arch/x86/include/asm/irq_regs_32.h +++ b/arch/x86/include/asm/irq_regs_32.h @@ -9,6 +9,8 @@ #include <asm/percpu.h> +#define ARCH_HAS_OWN_IRQ_REGS + DECLARE_PER_CPU(struct pt_regs *, irq_regs); static inline struct pt_regs *get_irq_regs(void) diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 9cd83a8e40d..38ae163cc91 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void) static __always_inline cycles_t vget_cycles(void) { - cycles_t cycles; - /* * We only do VDSOs on TSC capable CPUs, so this shouldnt * access boot_cpu_data (which is not VDSO-safe): @@ -44,11 +42,7 @@ static __always_inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - rdtsc_barrier(); - cycles = (cycles_t)__native_read_tsc(); - rdtsc_barrier(); - - return cycles; + return (cycles_t)__native_read_tsc(); } extern void tsc_init(void); |