diff options
Diffstat (limited to 'include/asm-x86_64')
35 files changed, 224 insertions, 233 deletions
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index 16ec82e16b2..6c5d5ca8383 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -109,9 +109,10 @@ extern unsigned int nmi_watchdog; #define NMI_LOCAL_APIC 2 #define NMI_INVALID 3 +extern int disable_timer_pin_1; + #endif /* CONFIG_X86_LOCAL_APIC */ -#define esr_disable 0 extern unsigned boot_cpu_id; #endif /* __ASM_APIC_H */ diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h index 9388062c4f6..fb1c99ac669 100644 --- a/include/asm-x86_64/apicdef.h +++ b/include/asm-x86_64/apicdef.h @@ -113,6 +113,7 @@ #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) #define MAX_IO_APICS 128 +#define MAX_LOCAL_APIC 256 /* * All x86-64 systems are xAPIC compatible. diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index eed78566728..80ac1fe966a 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h @@ -9,10 +9,8 @@ */ struct bug_frame { unsigned char ud2[2]; - unsigned char mov; - /* should use 32bit offset instead, but the assembler doesn't - like it */ - char *filename; + unsigned char push; + signed int filename; unsigned char ret; unsigned short line; } __attribute__((packed)); @@ -25,8 +23,8 @@ struct bug_frame { The magic numbers generate mov $64bitimm,%eax ; ret $offset. */ #define BUG() \ asm volatile( \ - "ud2 ; .byte 0xa3 ; .quad %c1 ; .byte 0xc2 ; .short %c0" :: \ - "i"(__LINE__), "i" (__stringify(__FILE__))) + "ud2 ; pushq $%c1 ; ret $%c0" :: \ + "i"(__LINE__), "i" (__FILE__)) void out_of_line_bug(void); #else static inline void out_of_line_bug(void) { } diff --git a/include/asm-x86_64/calling.h b/include/asm-x86_64/calling.h index 0bc12655fa5..fc2c5a6c262 100644 --- a/include/asm-x86_64/calling.h +++ b/include/asm-x86_64/calling.h @@ -65,27 +65,36 @@ .if \skipr11 .else movq (%rsp),%r11 + CFI_RESTORE r11 .endif .if \skipr8910 .else movq 1*8(%rsp),%r10 + CFI_RESTORE r10 movq 2*8(%rsp),%r9 + CFI_RESTORE r9 movq 3*8(%rsp),%r8 + CFI_RESTORE r8 .endif .if \skiprax .else movq 4*8(%rsp),%rax + CFI_RESTORE rax .endif .if \skiprcx .else movq 5*8(%rsp),%rcx + CFI_RESTORE rcx .endif .if \skiprdx .else movq 6*8(%rsp),%rdx + CFI_RESTORE rdx .endif movq 7*8(%rsp),%rsi + CFI_RESTORE rsi movq 8*8(%rsp),%rdi + CFI_RESTORE rdi .if ARG_SKIP+\addskip > 0 addq $ARG_SKIP+\addskip,%rsp CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) @@ -124,11 +133,17 @@ .macro RESTORE_REST movq (%rsp),%r15 + CFI_RESTORE r15 movq 1*8(%rsp),%r14 + CFI_RESTORE r14 movq 2*8(%rsp),%r13 + CFI_RESTORE r13 movq 3*8(%rsp),%r12 + CFI_RESTORE r12 movq 4*8(%rsp),%rbp + CFI_RESTORE rbp movq 5*8(%rsp),%rbx + CFI_RESTORE rbx addq $REST_SKIP,%rsp CFI_ADJUST_CFA_OFFSET -(REST_SKIP) .endm @@ -146,11 +161,3 @@ .macro icebp .byte 0xf1 .endm - -#ifdef CONFIG_FRAME_POINTER -#define ENTER enter -#define LEAVE leave -#else -#define ENTER -#define LEAVE -#endif diff --git a/include/asm-x86_64/current.h b/include/asm-x86_64/current.h index 7db560ee6f7..bc8adecee66 100644 --- a/include/asm-x86_64/current.h +++ b/include/asm-x86_64/current.h @@ -17,7 +17,7 @@ static inline struct task_struct *get_current(void) #else #ifndef ASM_OFFSET_H -#include <asm/offset.h> +#include <asm/asm-offsets.h> #endif #define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index c89b58bebee..594e610f4a1 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h @@ -191,7 +191,7 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu) /* * load one particular LDT into the current CPU */ -extern inline void load_LDT_nolock (mm_context_t *pc, int cpu) +static inline void load_LDT_nolock (mm_context_t *pc, int cpu) { int count = pc->size; diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index a416dc31634..e784fdc524f 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h @@ -85,6 +85,11 @@ static inline void dma_sync_single_for_device(struct device *hwdev, flush_write_buffers(); } +#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ + dma_sync_single_for_cpu(dev, dma_handle, size, dir) +#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ + dma_sync_single_for_device(dev, dma_handle, size, dir) + static inline void dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, int direction) diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h index afd4212e860..582757fc036 100644 --- a/include/asm-x86_64/dwarf2.h +++ b/include/asm-x86_64/dwarf2.h @@ -24,6 +24,10 @@ #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset #define CFI_OFFSET .cfi_offset #define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_REGISTER .cfi_register +#define CFI_RESTORE .cfi_restore +#define CFI_REMEMBER_STATE .cfi_remember_state +#define CFI_RESTORE_STATE .cfi_restore_state #else @@ -36,6 +40,10 @@ #define CFI_ADJUST_CFA_OFFSET # #define CFI_OFFSET # #define CFI_REL_OFFSET # +#define CFI_REGISTER # +#define CFI_RESTORE # +#define CFI_REMEMBER_STATE # +#define CFI_RESTORE_STATE # #endif diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h index cf8b16cbe8d..a582cfcf223 100644 --- a/include/asm-x86_64/fixmap.h +++ b/include/asm-x86_64/fixmap.h @@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void); * directly without translation, we catch the bug with a NULL-deference * kernel oops. Illegal ranges of incoming indices are caught too. */ -extern inline unsigned long fix_to_virt(const unsigned int idx) +static inline unsigned long fix_to_virt(const unsigned int idx) { /* * this branch gets completely eliminated after inlining, diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h index 27c381fa1c9..8661b476fb4 100644 --- a/include/asm-x86_64/hardirq.h +++ b/include/asm-x86_64/hardirq.h @@ -9,11 +9,12 @@ #define __ARCH_IRQ_STAT 1 -/* Generate a lvalue for a pda member. Should fix softirq.c instead to use - special access macros. This would generate better code. */ -#define __IRQ_STAT(cpu,member) (read_pda(me)->member) +#define local_softirq_pending() read_pda(__softirq_pending) -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#define __ARCH_SET_SOFTIRQ_PENDING 1 + +#define set_softirq_pending(x) write_pda(__softirq_pending, (x)) +#define or_softirq_pending(x) or_pda(__softirq_pending, (x)) /* * 'what should we do if we get a hw irq event on an illegal vector'. diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h index 2b5cb2865d2..dc97668ea0f 100644 --- a/include/asm-x86_64/hw_irq.h +++ b/include/asm-x86_64/hw_irq.h @@ -26,6 +26,7 @@ struct hw_interrupt_type; #endif +#define NMI_VECTOR 0x02 /* * IDT vectors usable for external interrupt sources start * at 0x20: @@ -50,14 +51,15 @@ struct hw_interrupt_type; */ #define SPURIOUS_APIC_VECTOR 0xff #define ERROR_APIC_VECTOR 0xfe -#define INVALIDATE_TLB_VECTOR 0xfd -#define RESCHEDULE_VECTOR 0xfc -#define TASK_MIGRATION_VECTOR 0xfb -#define CALL_FUNCTION_VECTOR 0xfa -#define KDB_VECTOR 0xf9 - -#define THERMAL_APIC_VECTOR 0xf0 - +#define RESCHEDULE_VECTOR 0xfd +#define CALL_FUNCTION_VECTOR 0xfc +#define KDB_VECTOR 0xfb /* reserved for KDB */ +#define THERMAL_APIC_VECTOR 0xfa +/* 0xf9 free */ +#define INVALIDATE_TLB_VECTOR_END 0xf8 +#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ + +#define NUM_INVALIDATE_TLB_VECTORS 8 /* * Local APIC timer IRQ vector is on a different priority level, diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 37fc3f149a5..52ff269fe05 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h @@ -48,7 +48,7 @@ * Talk about misusing macros.. */ #define __OUT1(s,x) \ -extern inline void out##s(unsigned x value, unsigned short port) { +static inline void out##s(unsigned x value, unsigned short port) { #define __OUT2(s,s1,s2) \ __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" @@ -58,7 +58,7 @@ __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \ __OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \ #define __IN1(s) \ -extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; +static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v; #define __IN2(s,s1,s2) \ __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" @@ -68,12 +68,12 @@ __IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ __IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \ #define __INS(s) \ -extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ +static inline void ins##s(unsigned short port, void * addr, unsigned long count) \ { __asm__ __volatile__ ("rep ; ins" #s \ : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } #define __OUTS(s) \ -extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ +static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ { __asm__ __volatile__ ("rep ; outs" #s \ : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } @@ -110,12 +110,12 @@ __OUTS(l) * Change virtual addresses to physical addresses and vv. * These are pretty trivial */ -extern inline unsigned long virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile void * address) { return __pa(address); } -extern inline void * phys_to_virt(unsigned long address) +static inline void * phys_to_virt(unsigned long address) { return __va(address); } @@ -130,7 +130,7 @@ extern inline void * phys_to_virt(unsigned long address) extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags); -extern inline void __iomem * ioremap (unsigned long offset, unsigned long size) +static inline void __iomem * ioremap (unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h index 5e166b9d3bd..022e9d340ad 100644 --- a/include/asm-x86_64/ipi.h +++ b/include/asm-x86_64/ipi.h @@ -31,9 +31,20 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, unsigned int dest) { - unsigned int icr = APIC_DM_FIXED | shortcut | vector | dest; - if (vector == KDB_VECTOR) - icr = (icr & (~APIC_VECTOR_MASK)) | APIC_DM_NMI; + unsigned int icr = shortcut | dest; + + switch (vector) { + default: + icr |= APIC_DM_FIXED | vector; + break; + case NMI_VECTOR: + /* + * Setup KDB IPI to be delivered as an NMI + */ + case KDB_VECTOR: + icr |= APIC_DM_NMI; + break; + } return icr; } @@ -66,7 +77,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, unsign /* * Send the IPI. The write to APIC_ICR fires this off. */ - apic_write_around(APIC_ICR, cfg); + apic_write(APIC_ICR, cfg); } @@ -92,7 +103,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) * prepare target chip field */ cfg = __prepare_ICR2(x86_cpu_to_apicid[query_cpu]); - apic_write_around(APIC_ICR2, cfg); + apic_write(APIC_ICR2, cfg); /* * program the ICR @@ -102,7 +113,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) /* * Send the IPI. The write to APIC_ICR fires this off. */ - apic_write_around(APIC_ICR, cfg); + apic_write(APIC_ICR, cfg); } local_irq_restore(flags); } diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h index 4482657777b..fb724ba37ae 100644 --- a/include/asm-x86_64/irq.h +++ b/include/asm-x86_64/irq.h @@ -48,10 +48,6 @@ static __inline__ int irq_canonicalize(int irq) #define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif -struct irqaction; -struct pt_regs; -int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); - #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpumask.h> extern void fixup_irqs(cpumask_t map); diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b90341994d8..f604e84c530 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -46,7 +46,7 @@ extern void die(const char *,struct pt_regs *,long); extern void __die(const char *,struct pt_regs *,long); extern void show_registers(struct pt_regs *regs); extern void dump_pagetable(unsigned long); -extern void oops_begin(void); -extern void oops_end(void); +extern unsigned long oops_begin(void); +extern void oops_end(unsigned long); #endif diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index c954f15c1a7..3e72c41727c 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -29,7 +29,7 @@ static __inline__ void local_dec(local_t *v) :"m" (v->counter)); } -static __inline__ void local_add(unsigned long i, local_t *v) +static __inline__ void local_add(unsigned int i, local_t *v) { __asm__ __volatile__( "addl %1,%0" @@ -37,7 +37,7 @@ static __inline__ void local_add(unsigned long i, local_t *v) :"ir" (i), "m" (v->counter)); } -static __inline__ void local_sub(unsigned long i, local_t *v) +static __inline__ void local_sub(unsigned int i, local_t *v) { __asm__ __volatile__( "subl %1,%0" diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 768413751b3..b40c661f111 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -12,7 +12,7 @@ #include <asm/smp.h> -#define NODEMAPSIZE 0xff +#define NODEMAPSIZE 0xfff /* Simple perfect hash to map physical addresses to node numbers */ extern int memnode_shift; @@ -54,7 +54,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ ({ u8 nid__ = pfn_to_nid(pfn); \ - nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) <= node_end_pfn(nid__); })) + nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); })) #endif #define local_mapnr(kvaddr) \ diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index ba15279a79d..4d727f3f555 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -29,22 +29,37 @@ #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) /* wrmsr with exception handling */ -#define wrmsr_safe(msr,a,b) ({ int ret__; \ - asm volatile("2: wrmsr ; xorl %0,%0\n" \ - "1:\n\t" \ - ".section .fixup,\"ax\"\n\t" \ - "3: movl %4,%0 ; jmp 1b\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n\t" \ - " .quad 2b,3b\n\t" \ - ".previous" \ - : "=a" (ret__) \ - : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ +#define wrmsr_safe(msr,a,b) ({ int ret__; \ + asm volatile("2: wrmsr ; xorl %0,%0\n" \ + "1:\n\t" \ + ".section .fixup,\"ax\"\n\t" \ + "3: movl %4,%0 ; jmp 1b\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n\t" \ + " .quad 2b,3b\n\t" \ + ".previous" \ + : "=a" (ret__) \ + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ ret__; }) #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) +#define rdmsr_safe(msr,a,b) \ + ({ int ret__; \ + asm volatile ("1: rdmsr\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %4,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n" \ + " .quad 1b,3b\n" \ + ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\ + :"c"(msr), "i"(-EIO), "0"(0)); \ + ret__; }) + #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) @@ -64,7 +79,7 @@ : "=a" (low), "=d" (high) \ : "c" (counter)) -extern inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, +static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" @@ -90,7 +105,7 @@ static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, /* * CPUID functions returning a single datum */ -extern inline unsigned int cpuid_eax(unsigned int op) +static inline unsigned int cpuid_eax(unsigned int op) { unsigned int eax; @@ -100,7 +115,7 @@ extern inline unsigned int cpuid_eax(unsigned int op) : "bx", "cx", "dx"); return eax; } -extern inline unsigned int cpuid_ebx(unsigned int op) +static inline unsigned int cpuid_ebx(unsigned int op) { unsigned int eax, ebx; @@ -110,7 +125,7 @@ extern inline unsigned int cpuid_ebx(unsigned int op) : "cx", "dx" ); return ebx; } -extern inline unsigned int cpuid_ecx(unsigned int op) +static inline unsigned int cpuid_ecx(unsigned int op) { unsigned int eax, ecx; @@ -120,7 +135,7 @@ extern inline unsigned int cpuid_ecx(unsigned int op) : "bx", "dx" ); return ecx; } -extern inline unsigned int cpuid_edx(unsigned int op) +static inline unsigned int cpuid_edx(unsigned int op) { unsigned int eax, edx; diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h index 5c363a1482e..bcf55c3f7f7 100644 --- a/include/asm-x86_64/numa.h +++ b/include/asm-x86_64/numa.h @@ -9,6 +9,7 @@ struct node { }; extern int compute_hash_shift(struct node *nodes, int numnodes); +extern int pxm_to_node(int nid); #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) @@ -16,6 +17,8 @@ extern void numa_add_cpu(int cpu); extern void numa_init_array(void); extern int numa_off; +extern unsigned char apicid_to_node[256]; + #define NUMA_NO_NODE 0xff #endif diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 135ffaa0393..e5ab4d231f2 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -32,6 +32,8 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ +extern unsigned long end_pfn; + void clear_page(void *); void copy_page(void *, void *); @@ -111,7 +113,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifdef CONFIG_FLATMEM #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) -#define pfn_valid(pfn) ((pfn) < max_mapnr) +#define pfn_valid(pfn) ((pfn) < end_pfn) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index eeb3088a1c9..5a82a6762c2 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -50,10 +50,10 @@ extern int iommu_setup(char *opt); * address space. The networking and block device layers use * this boolean for bounce buffer decisions * - * On AMD64 it mostly equals, but we set it to zero to tell some subsystems - * that an IOMMU is available. + * On x86-64 it mostly equals, but we set it to zero to tell some subsystems + * that an hard or soft IOMMU is available. */ -#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0) +#define PCI_DMA_BUS_IS_PHYS 0 /* * x86-64 always supports DAC, but sometimes it is useful to force diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index 36b766cfc4d..bbf89aa8a1a 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h @@ -10,10 +10,8 @@ struct x8664_pda { struct task_struct *pcurrent; /* Current process */ unsigned long data_offset; /* Per cpu data offset from linker address */ - struct x8664_pda *me; /* Pointer to itself */ unsigned long kernelstack; /* top of kernel stack for current */ unsigned long oldrsp; /* user rsp for system call */ - unsigned long irqrsp; /* Old rsp for interrupts. */ int irqcount; /* Irq nesting counter. Starts with -1 */ int cpunumber; /* Logical CPU number */ char *irqstackptr; /* top of irqstack */ @@ -22,7 +20,7 @@ struct x8664_pda { struct mm_struct *active_mm; int mmu_state; unsigned apic_timer_irqs; -} ____cacheline_aligned; +} ____cacheline_aligned_in_smp; #define IRQSTACK_ORDER 2 @@ -42,13 +40,14 @@ extern void __bad_pda_field(void); #define pda_offset(field) offsetof(struct x8664_pda, field) #define pda_to_op(op,field,val) do { \ + typedef typeof_field(struct x8664_pda, field) T__; \ switch (sizeof_field(struct x8664_pda, field)) { \ case 2: \ -asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ +asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ case 4: \ -asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ +asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ case 8: \ -asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ +asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ default: __bad_pda_field(); \ } \ } while (0) @@ -58,7 +57,7 @@ asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); bre * Unfortunately removing them causes all hell to break lose currently. */ #define pda_from_op(op,field) ({ \ - typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \ + typeof_field(struct x8664_pda, field) ret__; \ switch (sizeof_field(struct x8664_pda, field)) { \ case 2: \ asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ @@ -75,6 +74,7 @@ asm volatile(op "q %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); b #define write_pda(field,val) pda_to_op("mov",field,val) #define add_pda(field,val) pda_to_op("add",field,val) #define sub_pda(field,val) pda_to_op("sub",field,val) +#define or_pda(field,val) pda_to_op("or",field,val) #endif diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index deadd146978..08cad2482bc 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h @@ -18,12 +18,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); } -extern __inline__ pmd_t *get_pmd(void) +static inline pmd_t *get_pmd(void) { return (pmd_t *)get_zeroed_page(GFP_KERNEL); } -extern __inline__ void pmd_free(pmd_t *pmd) +static inline void pmd_free(pmd_t *pmd) { BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); free_page((unsigned long)pmd); @@ -86,13 +86,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add /* Should really implement gc for free page table pages. This could be done with a reference count in struct page. */ -extern __inline__ void pte_free_kernel(pte_t *pte) +static inline void pte_free_kernel(pte_t *pte) { BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); free_page((unsigned long)pte); } -extern inline void pte_free(struct page *pte) +static inline void pte_free(struct page *pte) { __free_page(pte); } diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 5e0f2fdab0d..1dc110ba82d 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -85,7 +85,7 @@ static inline void set_pud(pud_t *dst, pud_t val) pud_val(*dst) = pud_val(val); } -extern inline void pud_clear (pud_t *pud) +static inline void pud_clear (pud_t *pud) { set_pud(pud, __pud(0)); } @@ -95,7 +95,7 @@ static inline void set_pgd(pgd_t *dst, pgd_t val) pgd_val(*dst) = pgd_val(val); } -extern inline void pgd_clear (pgd_t * pgd) +static inline void pgd_clear (pgd_t * pgd) { set_pgd(pgd, __pgd(0)); } @@ -375,7 +375,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) } /* Change flags of a PTE */ -extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= _PAGE_CHG_MASK; pte_val(pte) |= pgprot_val(newprot); diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index a8321999448..03837d34fba 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -254,7 +254,13 @@ struct thread_struct { u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; } __attribute__((aligned(16))); -#define INIT_THREAD {} +#define INIT_THREAD { \ + .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ +} + +#define INIT_TSS { \ + .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \ +} #define INIT_MMAP \ { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL } @@ -375,13 +381,13 @@ struct extended_sigtable { #define ASM_NOP_MAX 8 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -extern inline void rep_nop(void) +static inline void rep_nop(void) { __asm__ __volatile__("rep;nop": : :"memory"); } /* Stop speculative execution */ -extern inline void sync_core(void) +static inline void sync_core(void) { int tmp; asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index 6c813eb521f..dbb37b0adb4 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -8,7 +8,6 @@ struct cpuinfo_x86; struct pt_regs; -extern void get_cpu_vendor(struct cpuinfo_x86*); extern void start_kernel(void); extern void pda_init(int); @@ -75,9 +74,6 @@ extern void acpi_reserve_bootmem(void); extern void swap_low_mappings(void); -extern void oops_begin(void); -extern void die(const char *,struct pt_regs *,long); -extern void __die(const char * str, struct pt_regs * regs, long err); extern void __show_regs(struct pt_regs * regs); extern void show_regs(struct pt_regs * regs); @@ -94,8 +90,6 @@ extern int unhandled_signal(struct task_struct *tsk, int sig); extern void select_idle_routine(const struct cpuinfo_x86 *c); extern void swiotlb_init(void); -extern unsigned long max_mapnr; -extern unsigned long end_pfn; extern unsigned long table_start, table_end; extern int exception_trace; diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index fe9b96d9481..f8d55798535 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -143,23 +143,23 @@ typedef struct sigaltstack { #undef __HAVE_ARCH_SIG_BITOPS #if 0 -extern __inline__ void sigaddset(sigset_t *set, int _sig) +static inline void sigaddset(sigset_t *set, int _sig) { __asm__("btsq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); } -extern __inline__ void sigdelset(sigset_t *set, int _sig) +static inline void sigdelset(sigset_t *set, int _sig) { __asm__("btrq %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); } -extern __inline__ int __const_sigismember(sigset_t *set, int _sig) +static inline int __const_sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; return 1 & (set->sig[sig / _NSIG_BPW] >> (sig & ~(_NSIG_BPW-1))); } -extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) +static inline int __gen_sigismember(sigset_t *set, int _sig) { int ret; __asm__("btq %2,%1\n\tsbbq %0,%0" @@ -172,7 +172,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig) __const_sigismember((set),(sig)) : \ __gen_sigismember((set),(sig))) -extern __inline__ int sigfindinword(unsigned long word) +static inline int sigfindinword(unsigned long word) { __asm__("bsfq %1,%0" : "=r"(word) : "rm"(word) : "cc"); return word; diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index de8b57b2b62..24e32611f0b 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -72,7 +72,7 @@ static inline int num_booting_cpus(void) #define raw_smp_processor_id() read_pda(cpunumber) -extern __inline int hard_smp_processor_id(void) +static inline int hard_smp_processor_id(void) { /* we don't want to mark this access volatile - bad code generation */ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 5aeb57a3baa..69636831ad2 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h @@ -6,47 +6,21 @@ #include <asm/page.h> #include <linux/config.h> -extern int printk(const char * fmt, ...) - __attribute__ ((format (printf, 1, 2))); - /* * Your basic SMP spinlocks, allowing only a single CPU anywhere - */ - -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned magic; -#endif -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#ifdef CONFIG_DEBUG_SPINLOCK -#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC -#else -#define SPINLOCK_MAGIC_INIT /* */ -#endif - -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -/* + * * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. + * + * (the type definitions are in asm/spinlock_types.h) */ -#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) -#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +#define __raw_spin_is_locked(x) \ + (*(volatile signed char *)(&(x)->slock) <= 0) -#define spin_lock_string \ +#define __raw_spin_lock_string \ "\n1:\t" \ "lock ; decb %0\n\t" \ "js 2f\n" \ @@ -58,74 +32,40 @@ typedef struct { "jmp 1b\n" \ LOCK_SECTION_END -/* - * This works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE) - * (PPro errata 66, 92) - */ - -#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) - -#define spin_unlock_string \ +#define __raw_spin_unlock_string \ "movb $1,%0" \ - :"=m" (lock->lock) : : "memory" - - -static inline void _raw_spin_unlock(spinlock_t *lock) -{ -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(lock->magic != SPINLOCK_MAGIC); - assert_spin_locked(lock); -#endif - __asm__ __volatile__( - spin_unlock_string - ); -} - -#else - -#define spin_unlock_string \ - "xchgb %b0, %1" \ - :"=q" (oldval), "=m" (lock->lock) \ - :"0" (oldval) : "memory" + :"=m" (lock->slock) : : "memory" -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { - char oldval = 1; -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(lock->magic != SPINLOCK_MAGIC); - assert_spin_locked(lock); -#endif __asm__ __volatile__( - spin_unlock_string - ); + __raw_spin_lock_string + :"=m" (lock->slock) : : "memory"); } -#endif +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int _raw_spin_trylock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval; + __asm__ __volatile__( "xchgb %b0,%1" - :"=q" (oldval), "=m" (lock->lock) + :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); + return oldval > 0; } -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { -#ifdef CONFIG_DEBUG_SPINLOCK - if (lock->magic != SPINLOCK_MAGIC) { - printk("eip: %p\n", __builtin_return_address(0)); - BUG(); - } -#endif __asm__ __volatile__( - spin_lock_string - :"=m" (lock->lock) : : "memory"); + __raw_spin_unlock_string + ); } +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) /* * Read-write spinlocks, allowing multiple readers @@ -136,33 +76,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. - */ -typedef struct { - volatile unsigned int lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned magic; -#endif -#ifdef CONFIG_PREEMPT - unsigned int break_lock; -#endif -} rwlock_t; - -#define RWLOCK_MAGIC 0xdeaf1eed - -#ifdef CONFIG_DEBUG_SPINLOCK -#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC -#else -#define RWLOCK_MAGIC_INIT /* */ -#endif - -#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } - -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -#define read_can_lock(x) ((int)(x)->lock > 0) -#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) - -/* + * * On x86, we implement read-write locks as a 32-bit counter * with the high bit (sign) being the "contended" bit. * @@ -170,29 +84,24 @@ typedef struct { * * Changed to use the same technique as rw semaphores. See * semaphore.h for details. -ben + * + * the helpers are in arch/i386/kernel/semaphore.c */ -/* the spinlock helpers are in arch/i386/kernel/semaphore.c */ -static inline void _raw_read_lock(rwlock_t *rw) +#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) + +static inline void __raw_read_lock(raw_rwlock_t *rw) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif __build_read_lock(rw, "__read_lock_failed"); } -static inline void _raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw) { -#ifdef CONFIG_DEBUG_SPINLOCK - BUG_ON(rw->magic != RWLOCK_MAGIC); -#endif __build_write_lock(rw, "__write_lock_failed"); } -#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") -#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") - -static inline int _raw_read_trylock(rwlock_t *lock) +static inline int __raw_read_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; atomic_dec(count); @@ -202,7 +111,7 @@ static inline int _raw_read_trylock(rwlock_t *lock) return 0; } -static inline int _raw_write_trylock(rwlock_t *lock) +static inline int __raw_write_trylock(raw_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -211,4 +120,15 @@ static inline int _raw_write_trylock(rwlock_t *lock) return 0; } +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0" + : "=m" (rw->lock) : : "memory"); +} + #endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h new file mode 100644 index 00000000000..59efe849f35 --- /dev/null +++ b/include/asm-x86_64/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef __ASM_SPINLOCK_TYPES_H +#define __ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int slock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 1 } + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } + +#endif diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 8606e170a7d..85348e02ad2 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -188,7 +188,7 @@ static inline void write_cr4(unsigned long val) #define __xg(x) ((volatile long *)(x)) -extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val) +static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) { *ptr = val; } @@ -253,19 +253,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index 24ecf6a637c..f971f45d6d7 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h @@ -6,7 +6,6 @@ #ifndef _ASMx8664_TIMEX_H #define _ASMx8664_TIMEX_H -#include <linux/config.h> #include <asm/8253pit.h> #include <asm/msr.h> #include <asm/vsyscall.h> diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 505b0cf906d..4a9c20ea9b1 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h @@ -109,6 +109,10 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 +/* Roughly an IPI every 20MB with 4k pages for freeing page table + ranges. Cost is about 42k of memory for each CPU. */ +#define ARCH_FREE_PTE_NR 5350 + #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c1bc3fad482..1c603cd7e4d 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -13,7 +13,6 @@ extern cpumask_t cpu_online_map; extern unsigned char cpu_to_node[]; -extern unsigned char pci_bus_to_node[]; extern cpumask_t node_to_cpumask[]; #ifdef CONFIG_ACPI_NUMA @@ -26,7 +25,7 @@ extern int __node_distance(int, int); #define parent_node(node) (node) #define node_to_first_cpu(node) (__ffs(node_to_cpumask[node])) #define node_to_cpumask(node) (node_to_cpumask[node]) -#define pcibus_to_node(bus) pci_bus_to_node[(bus)->number] +#define pcibus_to_node(bus) ((long)(bus->sysdata)) #define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); /* sched_domains SD_NODE_INIT for x86_64 machines */ diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 2872da23fc7..438a3f52f83 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -29,7 +29,6 @@ enum vsyscall_num { struct vxtime_data { long hpet_address; /* HPET base address */ - unsigned long hz; /* HPET clocks / sec */ int last; unsigned long last_tsc; long quot; |