aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/byteorder.h5
-rw-r--r--arch/mips/include/asm/elf.h2
-rw-r--r--arch/parisc/include/asm/tlbflush.h5
-rw-r--r--arch/x86/Kconfig.cpu1
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/mm/pat.c16
6 files changed, 17 insertions, 14 deletions
diff --git a/arch/mips/include/asm/byteorder.h b/arch/mips/include/asm/byteorder.h
index 2988d29a086..33790b9e0cc 100644
--- a/arch/mips/include/asm/byteorder.h
+++ b/arch/mips/include/asm/byteorder.h
@@ -50,9 +50,8 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
{
__asm__(
- " dsbh %0, %1 \n"
- " dshd %0, %0 \n"
- " drotr %0, %0, 32 \n"
+ " dsbh %0, %1\n"
+ " dshd %0, %0"
: "=r" (x)
: "r" (x));
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index a8eac1697b3..d58f128aa74 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -232,7 +232,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
*/
#ifdef __MIPSEB__
#define ELF_DATA ELFDATA2MSB
-#elif __MIPSEL__
+#elif defined(__MIPSEL__)
#define ELF_DATA ELFDATA2LSB
#endif
#define ELF_ARCH EM_MIPS
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index b72ec66db69..1f6fd4fc05b 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -44,9 +44,12 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm); /* Should never happen */
-#ifdef CONFIG_SMP
+#if 1 || defined(CONFIG_SMP)
flush_tlb_all();
#else
+ /* FIXME: currently broken, causing space id and protection ids
+ * to go out of sync, resulting in faults on userspace accesses.
+ */
if (mm) {
if (mm->context != 0)
free_sid(mm->context);
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index b815664fe37..8e99073b9e0 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -520,6 +520,7 @@ config X86_PTRACE_BTS
bool "Branch Trace Store"
default y
depends on X86_DEBUGCTLMSR
+ depends on BROKEN
help
This adds a ptrace interface to the hardware's branch trace store.
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 73ece2633f4..141907ab6e2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -687,7 +687,7 @@ void math_error(void __user *ip)
err = swd & ~cwd & 0x3f;
-#if CONFIG_X86_32
+#ifdef CONFIG_X86_32
if (!err)
return;
#endif
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 541bcc944a5..85cbd3cd372 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -684,7 +684,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
{
int retval = 0;
unsigned long i, j;
- u64 paddr;
+ resource_size_t paddr;
unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
@@ -746,8 +746,8 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
{
int retval = 0;
unsigned long i, j;
- u64 base_paddr;
- u64 paddr;
+ resource_size_t base_paddr;
+ resource_size_t paddr;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
@@ -757,12 +757,12 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
if (is_linear_pfn_mapping(vma)) {
/* reserve the whole chunk starting from vm_pgoff */
- paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
return reserve_pfn_range(paddr, vma_size, prot);
}
/* reserve page by page using pfn and size */
- base_paddr = (u64)pfn << PAGE_SHIFT;
+ base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
for (i = 0; i < size; i += PAGE_SIZE) {
paddr = base_paddr + i;
retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
@@ -790,7 +790,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size)
{
unsigned long i;
- u64 paddr;
+ resource_size_t paddr;
unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
@@ -801,14 +801,14 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
if (is_linear_pfn_mapping(vma)) {
/* free the whole chunk starting from vm_pgoff */
- paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
free_pfn_range(paddr, vma_size);
return;
}
if (size != 0 && size != vma_size) {
/* free page by page, using pfn and size */
- paddr = (u64)pfn << PAGE_SHIFT;
+ paddr = (resource_size_t)pfn << PAGE_SHIFT;
for (i = 0; i < size; i += PAGE_SIZE) {
paddr = paddr + i;
free_pfn_range(paddr, PAGE_SIZE);