From 08d9d1c4d44ce43856da048cb0737ef769b61e9a Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 17 Dec 2008 18:19:18 -0800 Subject: MIPS: Fix preprocessor warnings flaged by GCC 4.4 Signed-off-by: David Daney Signed-off-by: Ralf Baechle --- arch/mips/include/asm/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index a8eac1697b3..d58f128aa74 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -232,7 +232,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; */ #ifdef __MIPSEB__ #define ELF_DATA ELFDATA2MSB -#elif __MIPSEL__ +#elif defined(__MIPSEL__) #define ELF_DATA ELFDATA2LSB #endif #define ELF_ARCH EM_MIPS -- cgit v1.2.3 From ed2b03ed3cec2a4719d04ef208319f9de6a4258a Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 17 Dec 2008 13:28:39 -0800 Subject: MIPS: MIPS64R2: Fix buggy __arch_swab64 The way the code is written it was assuming dshd has the function of a hypothetical dshw instruction ... Signed-off-by: David Daney Signed-off-by: Ralf Baechle --- arch/mips/include/asm/byteorder.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/byteorder.h b/arch/mips/include/asm/byteorder.h index 2988d29a086..33790b9e0cc 100644 --- a/arch/mips/include/asm/byteorder.h +++ b/arch/mips/include/asm/byteorder.h @@ -50,9 +50,8 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x) static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( - " dsbh %0, %1 \n" - " dshd %0, %0 \n" - " drotr %0, %0, 32 \n" + " dsbh %0, %1\n" + " dshd %0, %0" : "=r" (x) : "r" (x)); -- cgit v1.2.3 From 5289f46b9de04bde181d833d48df9671b69c4b08 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Tue, 23 Dec 2008 08:44:30 -0500 Subject: parisc: disable UP-optimized flush_tlb_mm flush_tlb_mm's "optimized" uniprocessor case of allocating a new context for userspace is exposing a race where we can suddely return to a syscall with the protection id and space id out of sync, trapping on the next userspace access. Debugged-by: James Bottomley Tested-by: Helge Deller Signed-off-by: Kyle McMartin Signed-off-by: Linus Torvalds --- arch/parisc/include/asm/tlbflush.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index b72ec66db69..1f6fd4fc05b 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -44,9 +44,12 @@ static inline void flush_tlb_mm(struct mm_struct *mm) { BUG_ON(mm == &init_mm); /* Should never happen */ -#ifdef CONFIG_SMP +#if 1 || defined(CONFIG_SMP) flush_tlb_all(); #else + /* FIXME: currently broken, causing space id and protection ids + * to go out of sync, resulting in faults on userspace accesses. + */ if (mm) { if (mm->context != 0) free_sid(mm->context); -- cgit v1.2.3 From c1c15b65ec30275575dac9322aae607075769fbc Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 23 Dec 2008 10:10:40 -0800 Subject: x86: PAT: fix address types in track_pfn_vma_new() Impact: cleanup, fix warning This warning: arch/x86/mm/pat.c: In function track_pfn_vma_copy: arch/x86/mm/pat.c:701: warning: passing argument 5 of follow_phys from incompatible pointer type Triggers because physical addresses are resource_size_t, not u64. This really matters when calling an interface like follow_phys() which takes a pointer to a physical address -- although on x86, being littleendian, it would generally work anyway as long as the memory region wasn't completely uninitialized. Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 541bcc944a5..85cbd3cd372 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -684,7 +684,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) { int retval = 0; unsigned long i, j; - u64 paddr; + resource_size_t paddr; unsigned long prot; unsigned long vma_start = vma->vm_start; unsigned long vma_end = vma->vm_end; @@ -746,8 +746,8 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, { int retval = 0; unsigned long i, j; - u64 base_paddr; - u64 paddr; + resource_size_t base_paddr; + resource_size_t paddr; unsigned long vma_start = vma->vm_start; unsigned long vma_end = vma->vm_end; unsigned long vma_size = vma_end - vma_start; @@ -757,12 +757,12 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ - paddr = (u64)vma->vm_pgoff << PAGE_SHIFT; + paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; return reserve_pfn_range(paddr, vma_size, prot); } /* reserve page by page using pfn and size */ - base_paddr = (u64)pfn << PAGE_SHIFT; + base_paddr = (resource_size_t)pfn << PAGE_SHIFT; for (i = 0; i < size; i += PAGE_SIZE) { paddr = base_paddr + i; retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); @@ -790,7 +790,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { unsigned long i; - u64 paddr; + resource_size_t paddr; unsigned long prot; unsigned long vma_start = vma->vm_start; unsigned long vma_end = vma->vm_end; @@ -801,14 +801,14 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, if (is_linear_pfn_mapping(vma)) { /* free the whole chunk starting from vm_pgoff */ - paddr = (u64)vma->vm_pgoff << PAGE_SHIFT; + paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; free_pfn_range(paddr, vma_size); return; } if (size != 0 && size != vma_size) { /* free page by page, using pfn and size */ - paddr = (u64)pfn << PAGE_SHIFT; + paddr = (resource_size_t)pfn << PAGE_SHIFT; for (i = 0; i < size; i += PAGE_SIZE) { paddr = paddr + i; free_pfn_range(paddr, PAGE_SIZE); -- cgit v1.2.3 From 40f15ad8aadff5ebb621b17a6f303ad2cd3f847d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 24 Dec 2008 10:49:51 +0100 Subject: x86: disable X86_PTRACE_BTS there's a new ptrace arch level feature in .28: config X86_PTRACE_BTS bool "Branch Trace Store" it has broken fork() handling: the old DS area gets copied over into a new task without clearing it. Fixes exist but they came too late: c5dee61: x86, bts: memory accounting bf53de9: x86, bts: add fork and exit handling and are queued up for v2.6.29. This shows that the facility is still not tested well enough to release into a stable kernel - disable it for now and reactivate in .29. In .29 the hardware-branch-tracer will use the DS/BTS facilities too - hopefully resulting in better code. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index b815664fe37..8e99073b9e0 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -520,6 +520,7 @@ config X86_PTRACE_BTS bool "Branch Trace Store" default y depends on X86_DEBUGCTLMSR + depends on BROKEN help This adds a ptrace interface to the hardware's branch trace store. -- cgit v1.2.3 From 1fcccb008be12ea823aaa392758e1e41fb82de9a Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Date: Tue, 23 Dec 2008 21:50:11 +0530 Subject: x86: traps.c replace #if CONFIG_X86_32 with #ifdef CONFIG_X86_32 Impact: cleanup, avoid warning on X86_64 Fixes this warning on X86_64: CC arch/x86/kernel/traps.o arch/x86/kernel/traps.c:695:5: warning: "CONFIG_X86_32" is not defined Signed-off-by: Jaswinder Singh Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c320c29255c..f37cee75ab5 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -691,7 +691,7 @@ void math_error(void __user *ip) err = swd & ~cwd & 0x3f; -#if CONFIG_X86_32 +#ifdef CONFIG_X86_32 if (!err) return; #endif -- cgit v1.2.3