aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/avr32/include/asm/pgalloc.h2
-rw-r--r--arch/cris/include/asm/pgalloc.h2
-rw-r--r--arch/frv/include/asm/pgalloc.h4
-rw-r--r--arch/frv/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/pgalloc.h6
-rw-r--r--arch/ia64/include/asm/tlb.h12
-rw-r--r--arch/m32r/include/asm/pgalloc.h4
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/microblaze/Makefile35
-rw-r--r--arch/microblaze/include/asm/io.h1
-rw-r--r--arch/microblaze/include/asm/pgalloc.h4
-rw-r--r--arch/microblaze/include/asm/pgtable.h6
-rw-r--r--arch/microblaze/include/asm/prom.h23
-rw-r--r--arch/microblaze/include/asm/tlb.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h2
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c2
-rw-r--r--arch/microblaze/kernel/head.S17
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S109
-rw-r--r--arch/microblaze/kernel/module.c19
-rw-r--r--arch/microblaze/kernel/setup.c8
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c99
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/microblaze/mm/fault.c15
-rw-r--r--arch/mips/include/asm/pgalloc.h15
-rw-r--r--arch/mn10300/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/tlb.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc.h6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/s390/include/asm/tlb.h9
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/power/swsusp.c36
-rw-r--r--arch/s390/power/swsusp_asm64.S35
-rw-r--r--arch/sh/include/asm/pgalloc.h10
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h8
-rw-r--r--arch/sparc/include/asm/tlb_64.h6
-rw-r--r--arch/um/include/asm/pgalloc.h4
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/include/asm/pgalloc.h25
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h10
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c253
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/vmlinux.lds.S7
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/mm/srat_64.c6
-rw-r--r--arch/xtensa/include/asm/tlb.h2
62 files changed, 533 insertions, 392 deletions
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h
index c13636575fb..42866759f3f 100644
--- a/arch/alpha/include/asm/tlb.h
+++ b/arch/alpha/include/asm/tlb.h
@@ -9,7 +9,7 @@
#include <asm-generic/tlb.h>
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 321c83e43a1..f41a6f57cd1 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
+#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 64082132394..92ecd8446ef 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -83,7 +83,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
quicklist_free_page(QUICK_PT, NULL, pte);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index a1ba761d057..6da975db112 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
diff --git a/arch/frv/include/asm/pgalloc.h b/arch/frv/include/asm/pgalloc.h
index 971e6addb00..416d19a632f 100644
--- a/arch/frv/include/asm/pgalloc.h
+++ b/arch/frv/include/asm/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb),(pte)); \
@@ -62,7 +62,7 @@ do { \
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
#define pmd_free(mm, x) do { } while (0)
-#define __pmd_free_tlb(tlb,x) do { } while (0)
+#define __pmd_free_tlb(tlb,x,a) do { } while (0)
#endif /* CONFIG_MMU */
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index 33233011b1c..22c60692b55 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -225,7 +225,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, address) do { } while (0)
/*
* The "pud_xxx()" functions here are trivial for a folded two-level
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index b9ac1a6fc21..96a8d927db2 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
quicklist_free(0, NULL, pud);
}
-#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
+#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_4 */
static inline void
@@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
quicklist_free(0, NULL, pmd);
}
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
@@ -117,6 +117,6 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 20d8a39680c..85d965cb19a 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -236,22 +236,22 @@ do { \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
-#define pte_free_tlb(tlb, ptep) \
+#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
- __pte_free_tlb(tlb, ptep); \
+ __pte_free_tlb(tlb, ptep, address); \
} while (0)
-#define pmd_free_tlb(tlb, ptep) \
+#define pmd_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
- __pmd_free_tlb(tlb, ptep); \
+ __pmd_free_tlb(tlb, ptep, address); \
} while (0)
-#define pud_free_tlb(tlb, pudp) \
+#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb->need_flush = 1; \
- __pud_free_tlb(tlb, pudp); \
+ __pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h
index f11a2b909cd..0fc73619897 100644
--- a/arch/m32r/include/asm/pgalloc.h
+++ b/arch/m32r/include/asm/pgalloc.h
@@ -58,7 +58,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
@@ -68,7 +68,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
-#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0)
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index d08bf6261df..15ee4c74a9f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -54,7 +54,8 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
__free_page(page);
}
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+ unsigned long address)
{
pgtable_page_dtor(page);
cache_page(kmap(page));
@@ -73,7 +74,8 @@ static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
return free_pointer_table(pmd);
}
-static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
{
return free_pointer_table(pmd);
}
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index d4c83f14381..48d80d5a666 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -32,7 +32,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
__free_page(page);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
@@ -80,7 +80,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_free(mm, x) do { } while (0)
-#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d0bcf80a113..8439598d465 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -6,14 +6,16 @@ endif
# What CPU vesion are we building for, and crack it open
# as major.minor.rev
-CPU_VER=$(subst ",,$(CONFIG_XILINX_MICROBLAZE0_HW_VER) )
-CPU_MAJOR=$(shell echo $(CPU_VER) | cut -d '.' -f 1)
-CPU_MINOR=$(shell echo $(CPU_VER) | cut -d '.' -f 2)
-CPU_REV=$(shell echo $(CPU_VER) | cut -d '.' -f 3)
+CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
+CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
+CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2)
+CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3)
export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV
# Use cpu-related CONFIG_ vars to set compile options.
+# The various CONFIG_XILINX cpu features options are integers 0/1/2...
+# rather than bools y/n
# Work out HW multipler support. This is icky.
# 1. Spartan2 has no HW multiplers.
@@ -34,30 +36,29 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare
CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
-# The various CONFIG_XILINX cpu features options are integers 0/1/2...
-# rather than bools y/n
-
# r31 holds current when in kernel mode
-CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
+KBUILD_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
LDFLAGS :=
LDFLAGS_vmlinux :=
-LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
-LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name)
+LIBGCC := $(shell $(CC) $(KBUILD_KERNEL) -print-libgcc-file-name)
-head-y := arch/microblaze/kernel/head.o
-libs-y += arch/microblaze/lib/ $(LIBGCC)
-core-y += arch/microblaze/kernel/ arch/microblaze/mm/ \
- arch/microblaze/platform/
+head-y := arch/microblaze/kernel/head.o
+libs-y += arch/microblaze/lib/
+libs-y += $(LIBGCC)
+core-y += arch/microblaze/kernel/
+core-y += arch/microblaze/mm/
+core-y += arch/microblaze/platform/
-boot := arch/$(ARCH)/boot
+boot := arch/microblaze/boot
# defines filename extension depending memory management type
ifeq ($(CONFIG_MMU),)
-MMUEXT := -nommu
+MMU := -nommu
endif
-export MMUEXT
+
+export MMU
all: linux.bin
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 5c173424d07..7c3ec13b44d 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -14,7 +14,6 @@
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/types.h>
-#include <asm/byteorder.h>
#include <linux/mm.h> /* Get struct page {...} */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 59a757e46ba..b0131da1387 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -180,7 +180,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
__free_page(ptepage);
}
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
@@ -193,7 +193,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
/*#define pmd_free(mm, x) do { } while (0)*/
-#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 4c57a586a98..cc3a4dfc3ea 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -185,6 +185,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Definitions for MicroBlaze. */
#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
@@ -320,8 +321,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-/* FIXME */
-static inline int pte_file(pte_t pte) { return 0; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -488,7 +488,7 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
/* Encode and decode a nonlinear file mapping entry */
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) })
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 20f7b3a926e..37e6f305a68 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -16,6 +16,18 @@
#define _ASM_MICROBLAZE_PROM_H
#ifdef __KERNEL__
+/* Definitions used by the flattened device tree */
+#define OF_DT_HEADER 0xd00dfeed /* marker */
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size, content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
+
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/platform_device.h>
@@ -29,16 +41,6 @@
#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
-/* Definitions used by the flattened device tree */
-#define OF_DT_HEADER 0xd00dfeed /* marker */
-#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
-#define OF_DT_END_NODE 0x2 /* End node */
-#define OF_DT_PROP 0x3 /* Property: name off, size, content */
-#define OF_DT_NOP 0x4 /* nop */
-#define OF_DT_END 0x9
-
-#define OF_DT_VERSION 0x10
-
/*
* This is what gets passed to the kernel by prom_init or kexec
*
@@ -309,5 +311,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
*/
#include <linux/of.h>
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_PROM_H */
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index c472d280113..e8abd4a0349 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -11,7 +11,7 @@
#ifndef _ASM_MICROBLAZE_TLB_H
#define _ASM_MICROBLAZE_TLB_H
-#define tlb_flush(tlb) do {} while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 65adad61e7e..5431b4631a7 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -189,7 +189,7 @@ extern long strnlen_user(const char *src, long count);
#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __gu_val = x; \
+ __typeof__(*(ptr)) volatile __gu_val = (x); \
long __gu_err = 0; \
switch (sizeof(__gu_val)) { \
case 1: \
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index f4a5e19a20e..d487729683d 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -17,4 +17,4 @@ obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
-obj-y += entry$(MMUEXT).o
+obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index 153f57c57b6..c259786e7fa 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -22,7 +22,7 @@
#define CI(c, p) { ci->c = PVR_##p(pvr); }
#define err_printk(x) \
- early_printk("ERROR: Microblaze " x " - different for PVR and DTS\n");
+ early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
{
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 450ca6bb828..adb448f93d5 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
#define err_printk(x) \
- early_printk("ERROR: Microblaze " x "- different for kernel and DTS\n");
+ early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
{
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index a10bea119b9..c411c6757de 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -26,6 +26,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.10.b", 0x09},
{"7.10.c", 0x0a},
{"7.10.d", 0x0b},
+ {"7.20.a", 0x0c},
+ {"7.20.b", 0x0d},
/* FIXME There is no keycode defined in MBV for these versions */
{"2.10.a", 0x10},
{"3.00.a", 0x20},
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index e568d6ec621..e41c6ce2a7b 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -31,6 +31,7 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/prom.h> /* for OF_DT_HEADER */
#ifdef CONFIG_MMU
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -54,11 +55,19 @@ ENTRY(_start)
andi r1, r1, ~2
mts rmsr, r1
-/* save fdt to kernel location */
-/* r7 stores pointer to fdt blob */
- beqi r7, no_fdt_arg
+/* r7 may point to an FDT, or there may be one linked in.
+ if it's in r7, we've got to save it away ASAP.
+ We ensure r7 points to a valid FDT, just in case the bootloader
+ is broken or non-existent */
+ beqi r7, no_fdt_arg /* NULL pointer? don't copy */
+ lw r11, r0, r7 /* Does r7 point to a */
+ rsubi r11, r11, OF_DT_HEADER /* valid FDT? */
+ beqi r11, _prepare_copy_fdt
+ or r7, r0, r0 /* clear R7 when not valid DTB */
+ bnei r11, no_fdt_arg /* No - get out of here */
+_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
- ori r4, r0, TOPHYS(_fdt_start) /* save bram context */
+ ori r4, r0, TOPHYS(_fdt_start)
ori r3, r0, (0x4000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 9d591cd74fc..3288c973767 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -74,6 +74,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
+#include <asm/signal.h>
#include <asm/asm-offsets.h>
/* Helpful Macros */
@@ -428,19 +429,9 @@ handle_unaligned_ex:
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
_no_delayslot:
-#endif
-
-#ifdef CONFIG_MMU
- /* Check if unaligned address is last on a 4k page */
- andi r5, r4, 0xffc
- xori r5, r5, 0xffc
- bnei r5, _unaligned_ex2
- _unaligned_ex1:
- RESTORE_STATE;
-/* Another page must be accessed or physical address not in page table */
- bri unaligned_data_trap
-
- _unaligned_ex2:
+ /* jump to high level unaligned handler */
+ RESTORE_STATE;
+ bri unaligned_data_trap
#endif
andi r6, r3, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
@@ -450,45 +441,6 @@ _no_delayslot:
srl r6, r6;
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
-#ifdef CONFIG_MMU
- /* Get physical address */
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- ori r5, r0, CONFIG_KERNEL_START
- cmpu r5, r4, r5
- bgti r5, _unaligned_ex3
- ori r5, r0, swapper_pg_dir
- bri _unaligned_ex4
-
- /* Get the PGD for the current thread. */
-_unaligned_ex3: /* user thread */
- addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
- lwi r5, r5, TASK_THREAD + PGDIR
-_unaligned_ex4:
- tophys(r5,r5)
- BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
- andi r6, r6, 0xffc
-/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
- or r5, r5, r6
- lwi r6, r5, 0 /* Get L1 entry */
- andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
- beqi r5, _unaligned_ex1 /* Bail if no table */
-
- tophys(r5,r5)
- BSRLI(r6,r4,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
- or r5, r5, r6
- lwi r5, r5, 0 /* Get Linux PTE */
-
- andi r6, r5, _PAGE_PRESENT
- beqi r6, _unaligned_ex1 /* Bail if no page */
-
- andi r5, r5, 0xfffff000 /* Extract RPN */
- andi r4, r4, 0x00000fff /* Extract offset */
- or r4, r4, r5 /* Create physical address */
-#endif /* CONFIG_MMU */
andi r6, r3, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
@@ -959,15 +911,15 @@ _unaligned_data_exception:
andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
ex_lw_vm:
beqid r6, ex_lhw_vm;
- lbui r5, r4, 0; /* Exception address in r4 - delay slot */
+load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */
/* Load a word, byte-by-byte from destination address and save it in tmp space*/
la r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
- lbui r5, r4, 1;
+load2: lbui r5, r4, 1;
sbi r5, r6, 1;
- lbui r5, r4, 2;
+load3: lbui r5, r4, 2;
sbi r5, r6, 2;
- lbui r5, r4, 3;
+load4: lbui r5, r4, 3;
sbi r5, r6, 3;
brid ex_lw_tail_vm;
/* Get the destination register value into r3 - delay slot */
@@ -977,7 +929,7 @@ ex_lhw_vm:
* save it in tmp space */
la r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
- lbui r5, r4, 1;
+load5: lbui r5, r4, 1;
sbi r5, r6, 1;
lhui r3, r6, 0; /* Get the destination register value into r3 */
ex_lw_tail_vm:
@@ -996,22 +948,53 @@ ex_sw_tail_vm:
swi r3, r5, 0; /* Get the word - delay slot */
/* Store the word, byte-by-byte into destination address */
lbui r3, r5, 0;
- sbi r3, r4, 0;
+store1: sbi r3, r4, 0;
lbui r3, r5, 1;
- sbi r3, r4, 1;
+store2: sbi r3, r4, 1;
lbui r3, r5, 2;
- sbi r3, r4, 2;
+store3: sbi r3, r4, 2;
lbui r3, r5, 3;
brid ret_from_exc;
- sbi r3, r4, 3; /* Delay slot */
+store4: sbi r3, r4, 3; /* Delay slot */
ex_shw_vm:
/* Store the lower half-word, byte-by-byte into destination address */
lbui r3, r5, 2;
- sbi r3, r4, 0;
+store5: sbi r3, r4, 0;
lbui r3, r5, 3;
brid ret_from_exc;
- sbi r3, r4, 1; /* Delay slot */
+store6: sbi r3, r4, 1; /* Delay slot */
ex_sw_end_vm: /* Exception handling of store word, ends. */
+
+/* We have to prevent cases that get/put_user macros get unaligned pointer
+ * to bad page area. We have to find out which origin instruction caused it
+ * and called fixup for that origin instruction not instruction in unaligned
+ * handler */
+ex_unaligned_fixup:
+ ori r5, r7, 0 /* setup pointer to pt_regs */
+ lwi r6, r7, PT_PC; /* faulting address is one instruction above */
+ addik r6, r6, -4 /* for finding proper fixup */
+ swi r6, r7, PT_PC; /* a save back it to PT_PC */
+ addik r7, r0, SIGSEGV
+ /* call bad_page_fault for finding aligned fixup, fixup address is saved
+ * in PT_PC which is used as return address from exception */
+ la r15, r0, ret_from_exc-8 /* setup return address */
+ brid bad_page_fault
+ nop
+
+/* We prevent all load/store because it could failed any attempt to access */
+.section __ex_table,"a";
+ .word load1,ex_unaligned_fixup;
+ .word load2,ex_unaligned_fixup;
+ .word load3,ex_unaligned_fixup;
+ .word load4,ex_unaligned_fixup;
+ .word load5,ex_unaligned_fixup;
+ .word store1,ex_unaligned_fixup;
+ .word store2,ex_unaligned_fixup;
+ .word store3,ex_unaligned_fixup;
+ .word store4,ex_unaligned_fixup;
+ .word store5,ex_unaligned_fixup;
+ .word store6,ex_unaligned_fixup;
+.previous;
.end _unaligned_data_exception
#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 51414171326..5a45b1adfef 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -57,7 +57,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
unsigned long int *location;
- unsigned long int locoffs;
unsigned long int value;
#if __GNUC__ < 4
unsigned long int old_value;
@@ -113,10 +112,12 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
break;
case R_MICROBLAZE_64_PCREL:
- locoffs = (location[0] & 0xFFFF) << 16 |
+#if __GNUC__ < 4
+ old_value = (location[0] & 0xFFFF) << 16 |
(location[1] & 0xFFFF);
- value -= (unsigned long int)(location) + 4 +
- locoffs;
+ value -= old_value;
+#endif
+ value -= (unsigned long int)(location) + 4;
location[0] = (location[0] & 0xFFFF0000) |
(value >> 16);
location[1] = (location[1] & 0xFFFF0000) |
@@ -125,6 +126,14 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
value);
break;
+ case R_MICROBLAZE_32_PCREL_LO:
+ pr_debug("R_MICROBLAZE_32_PCREL_LO\n");
+ break;
+
+ case R_MICROBLAZE_64_NONE:
+ pr_debug("R_MICROBLAZE_NONE\n");
+ break;
+
case R_MICROBLAZE_NONE:
pr_debug("R_MICROBLAZE_NONE\n");
break;
@@ -133,7 +142,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
printk(KERN_ERR "module %s: "
"Unknown relocation: %u\n",
module->name,
- ELF32_R_TYPE(rela->r_info));
+ ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 8709bea0960..2a97bf513b6 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -138,8 +138,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
- early_printk("Ramdisk addr 0x%08x, FDT 0x%08x\n", ram, fdt);
- printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt);
+ early_printk("Ramdisk addr 0x%08x, ", ram);
+ if (fdt)
+ early_printk("FDT at 0x%08x\n", fdt);
+ else
+ early_printk("Compiled-in FDT at 0x%08x\n",
+ (unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
early_printk("Found romfs @ 0x%08x (0x%08x)\n",
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index e000bce09b2..b96f1682bb2 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -33,105 +33,6 @@
#include <linux/unistd.h>
#include <asm/syscalls.h>
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly. This will be remove with new toolchain.
- */
-asmlinkage long
-sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth)
-{
- int version, ret;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- ret = -EINVAL;
- switch (call) {
- case SEMOP:
- ret = sys_semop(first, (struct sembuf *)ptr, second);
- break;
- case SEMGET:
- ret = sys_semget(first, second, third);
- break;
- case SEMCTL:
- {
- union semun fourth;
-
- if (!ptr)
- break;
- ret = (access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
- || (get_user(fourth.__pad, (void **)ptr)) ;
- if (ret)
- break;
- ret = sys_semctl(first, second, third, fourth);
- break;
- }
- case MSGSND:
- ret = sys_msgsnd(first, (struct msgbuf *) ptr, second, third);
- break;
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
-
- if (!ptr)
- break;
- ret = (access_ok(VERIFY_READ, ptr, sizeof(tmp))
- ? 0 : -EFAULT) || copy_from_user(&tmp,
- (struct ipc_kludge *) ptr, sizeof(tmp));
- if (ret)
- break;
- ret = sys_msgrcv(first, tmp.msgp, second, tmp.msgtyp,
- third);
- break;
- }
- default:
- ret = sys_msgrcv(first, (struct msgbuf *) ptr,
- second, fifth, third);
- break;
- }
- break;
- case MSGGET:
- ret = sys_msgget((key_t) first, second);
- break;
- case MSGCTL:
- ret = sys_msgctl(first, second, (struct msqid_ds *) ptr);
- break;
- case SHMAT:
- switch (version) {
- default: {
- ulong raddr;
- ret = access_ok(VERIFY_WRITE, (ulong *) third,
- sizeof(ulong)) ? 0 : -EFAULT;
- if (ret)
- break;
- ret = do_shmat(first, (char *) ptr, second, &raddr);
- if (ret)
- break;
- ret = put_user(raddr, (ulong *) third);
- break;
- }
- case 1: /* iBCS2 emulator entry point */
- if (!segment_eq(get_fs(), get_ds()))
- break;
- ret = do_shmat(first, (char *) ptr, second,
- (ulong *) third);
- break;
- }
- break;
- case SHMDT:
- ret = sys_shmdt((char *)ptr);
- break;
- case SHMGET:
- ret = sys_shmget(first, second, third);
- break;
- case SHMCTL:
- ret = sys_shmctl(first, second, (struct shmid_ds *) ptr);
- break;
- }
- return ret;
-}
asmlinkage long microblaze_vfork(struct pt_regs *regs)
{
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 31b32a6c5f4..216db817beb 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -121,7 +121,7 @@ ENTRY(sys_call_table)
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
- .long sys_ipc
+ .long sys_ni_syscall /* old sys_ipc */
.long sys_fsync
.long sys_ni_syscall /* sys_sigreturn_wrapper */
.long sys_clone /* 120 */
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 956607a63f4..d9d249a66ff 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -69,7 +69,7 @@ static int store_updates_sp(struct pt_regs *regs)
* It is called from do_page_fault above and from some of the procedures
* in traps.c.
*/
-static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *fixup;
/* MS: no context */
@@ -122,15 +122,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif /* CONFIG_KGDB */
- if (in_atomic() || mm == NULL) {
- /* FIXME */
- if (kernel_mode(regs)) {
- printk(KERN_EMERG
- "Page fault in kernel mode - Oooou!!! pid %d\n",
- current->pid);
- _exception(SIGSEGV, regs, code, address);
- return;
- }
+ if (in_atomic() || !mm) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
+
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 1275831dda2..3738f4b48cb 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -98,23 +98,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_pages(pte, PTE_ORDER);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
-#ifdef CONFIG_32BIT
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-#define pmd_free(mm, x) do { } while (0)
-#define __pmd_free_tlb(tlb, x) do { } while (0)
-
-#endif
-
#ifdef CONFIG_64BIT
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -132,7 +121,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_pages((unsigned long)pmd, PMD_ORDER);
}
-#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x)
+#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#endif
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index ec057e1bd4c..a19f11327cd 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -51,6 +51,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
}
-#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
+#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
#endif /* _ASM_PGALLOC_H */
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h
index 383b1db310e..07924903989 100644
--- a/arch/parisc/include/asm/tlb.h
+++ b/arch/parisc/include/asm/tlb.h
@@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \
#include <asm-generic/tlb.h>
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 0815eb40aca..c9500d666a1 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
*/
/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
#define pmd_free(mm, x) do { } while (0)
-#define __pmd_free_tlb(tlb,x) do { } while (0)
+#define __pmd_free_tlb(tlb,x,a) do { } while (0)
/* #define pgd_populate(mm, pmd, pte) BUG() */
#ifndef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index afda2bdd860..e6f069c4f71 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf)
kmem_cache_free(pgtable_cache[cachenum], p);
}
-#define __pmd_free_tlb(tlb, pmd) \
+#define __pmd_free_tlb(tlb, pmd,addr) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
#ifndef CONFIG_PPC_64K_PAGES
-#define __pud_free_tlb(tlb, pud) \
+#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 5d8480265a7..1730e5e298d 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#ifdef CONFIG_SMP
-#define __pte_free_tlb(tlb,ptepage) \
+#define __pte_free_tlb(tlb,ptepage,address) \
do { \
pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
} while (0)
#else
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte))
#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9920d6a7cf2..c46ef2ffa3d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
- pmd_free_tlb(tlb, pmd);
+ pmd_free_tlb(tlb, pmd, start);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
- pud_free_tlb(tlb, pud);
+ pud_free_tlb(tlb, pud, start);
}
/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 3d8a96d39d9..81150b05368 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
-static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long address)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = pte;
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
* to avoid the double free of the pmd in this case.
*/
-static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31))
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
* to avoid the double free of the pud in this case.
*/
-static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42))
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b144049dc..8d15314381e 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_VM;
}
-static void early_pgm_check_handler(void)
+static __init void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@ static void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
-void setup_lowcore_early(void)
+static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730f535..be2cae08340 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
-#else
- if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
- BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
local_irq_enable();
+#ifdef CONFIG_64BIT
+ if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
+ BUG();
+#endif
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 79dbfee831e..49106c6e6f8 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -88,10 +88,17 @@ __kernel_clock_gettime:
llilh %r4,0x0100
sar %a4,%r4
lghi %r4,0
+ epsw %r5,0
sacf 512 /* Magic ectg instruction */
.insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
- sacf 0
- sar %a4,%r2
+ tml %r5,0x4000
+ jo 11f
+ tml %r5,0x8000
+ jno 10f
+ sacf 256
+ j 11f
+10: sacf 0
+11: sar %a4,%r2
algr %r1,%r0 /* r1 = cputime as TOD value */
mghi %r1,1000 /* convert to nanoseconds */
srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
index e6a4fe9f5f2..bd1f5c6b0b8 100644
--- a/arch/s390/power/swsusp.c
+++ b/arch/s390/power/swsusp.c
@@ -7,24 +7,36 @@
*
*/
+#include <asm/system.h>
-/*
- * save CPU registers before creating a hibernation image and before
- * restoring the memory state from it
- */
void save_processor_state(void)
{
- /* implentation contained in the
- * swsusp_arch_suspend function
+ /* swsusp_arch_suspend() actually saves all cpu register contents.
+ * Machine checks must be disabled since swsusp_arch_suspend() stores
+ * register contents to their lowcore save areas. That's the same
+ * place where register contents on machine checks would be saved.
+ * To avoid register corruption disable machine checks.
+ * We must also disable machine checks in the new psw mask for
+ * program checks, since swsusp_arch_suspend() may generate program
+ * checks. Disabling machine checks for all other new psw masks is
+ * just paranoia.
*/
+ local_mcck_disable();
+ /* Disable lowcore protection */
+ __ctl_clear_bit(0,28);
+ S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
}
-/*
- * restore the contents of CPU registers
- */
void restore_processor_state(void)
{
- /* implentation contained in the
- * swsusp_arch_resume function
- */
+ S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
+ /* Enable lowcore protection */
+ __ctl_set_bit(0,28);
+ local_mcck_enable();
}
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
index 76d688da32f..b26df5c5933 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/power/swsusp_asm64.S
@@ -32,19 +32,14 @@ swsusp_arch_suspend:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
- /* Switch off lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- ni __SF_EMPTY+4(%r15),0xef
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Setup base register for lowcore (absolute 0) */
- llgf %r1,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore */
+ llgf %r4,__SF_EMPTY(%r15)
/* Get pointer to save area */
- aghi %r1,0x1000
+ lghi %r1,0x1000
/* Store registers */
mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@@ -79,17 +74,15 @@ swsusp_arch_suspend:
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
- /* Setup lowcore */
- brasl %r14,setup_lowcore_early
+ lghi %r2,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
/* Save image */
brasl %r14,swsusp_save
- /* Switch on lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- oi __SF_EMPTY+4(%r15),0x10
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Restore prefix register and return */
lghi %r1,0x1000
spx 0x318(%r1)
@@ -117,11 +110,6 @@ swsusp_arch_resume:
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
- /* Switch off lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- ni __SF_EMPTY+4(%r15),0xef
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Set prefix page to zero */
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
@@ -175,7 +163,7 @@ swsusp_arch_resume:
/* Load old stack */
lg %r15,0x2f8(%r13)
- /* Pointer to save arae */
+ /* Pointer to save area */
lghi %r13,0x1000
#ifdef CONFIG_SMP
@@ -187,11 +175,6 @@ swsusp_arch_resume:
/* Restore prefix register */
spx 0x318(%r13)
- /* Switch on lowcore protection */
- stctg %c0,%c0,__SF_EMPTY(%r15)
- oi __SF_EMPTY+4(%r15),0x10
- lctlg %c0,%c0,__SF_EMPTY(%r15)
-
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 84dd2db7104..63ca37bd9a9 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -73,20 +73,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
quicklist_free_page(QUICK_PT, NULL, pte);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), (pte)); \
} while (0)
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-
-#define pmd_free(mm, x) do { } while (0)
-#define __pmd_free_tlb(tlb,x) do { } while (0)
-
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PGD, NULL, 25, 16);
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 9c16f737074..da8fe7ab872 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -91,9 +91,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
-#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp)
+#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
+#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 681582d2696..ca2b34456c4 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
-#define pmd_free(mm, pmd) free_pmd_fast(pmd)
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define pmd_free(mm, pmd) free_pmd_fast(pmd)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
@@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
-#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index ee38e731bfa..dca406b9b6f 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
}
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
-#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
-#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
+#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
+#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
+#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 718984359f8..32c8ce4e151 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -40,7 +40,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
-#define __pte_free_tlb(tlb,pte) \
+#define __pte_free_tlb(tlb,pte, address) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb),(pte)); \
@@ -53,7 +53,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
+#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
#endif
#define check_pgt_cache() do { } while (0)
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 5240fa1c5e0..660caedac9e 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -116,11 +116,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
-#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
+#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
+#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
-#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
+#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define tlb_migrate_finish(mm) do {} while (0)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index dd14c54ac71..0e8c2a0fd92 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
__free_page(pte);
}
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
+extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address)
+{
+ ___pte_free_tlb(tlb, pte);
+}
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
@@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
+extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long adddress)
+{
+ ___pmd_free_tlb(tlb, pmd);
+}
#ifdef CONFIG_X86_PAE
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
@@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_page((unsigned long)pud);
}
-extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
+extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
+
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
+{
+ ___pud_free_tlb(tlb, pud);
+}
+
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 2 */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 20e6a795e16..d2c6c930b49 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -212,9 +212,9 @@ extern int __get_user_bad(void);
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_asm_u64(x, ptr, retval, errret) \
- __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
+ __put_user_asm(x, ptr, retval, "q", "", "er", errret)
#define __put_user_asm_ex_u64(x, addr) \
- __put_user_asm_ex(x, addr, "q", "", "Zr")
+ __put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 8cc687326eb..db24b215fc5 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
ret, "l", "k", "ir", 4);
return ret;
case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
- ret, "q", "", "ir", 8);
+ ret, "q", "", "er", 8);
return ret;
case 10:
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
- ret, "q", "", "ir", 10);
+ ret, "q", "", "er", 10);
if (unlikely(ret))
return ret;
asm("":::"memory");
@@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
return ret;
case 16:
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
- ret, "q", "", "ir", 16);
+ ret, "q", "", "er", 16);
if (unlikely(ret))
return ret;
asm("":::"memory");
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
- ret, "q", "", "ir", 8);
+ ret, "q", "", "er", 8);
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
@@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
- ret, "q", "", "ir", 8);
+ ret, "q", "", "er", 8);
return ret;
}
default:
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 28e5f595604..e2485b03f1c 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
/* check CPU config space for extended APIC ID */
- if (c->x86 >= 0xf) {
+ if (cpu_has_apic && c->x86 >= 0xf) {
unsigned int val;
val = read_pci_config(0, 24, 0, 0x68);
if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 484c1e5f658..1cfb623ce11 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1692,17 +1692,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
const char *buf, size_t siz)
{
char *p;
- int len;
strncpy(mce_helper, buf, sizeof(mce_helper));
mce_helper[sizeof(mce_helper)-1] = 0;
- len = strlen(mce_helper);
p = strchr(mce_helper, '\n');
- if (*p)
+ if (p)
*p = 0;
- return len;
+ return strlen(mce_helper) + !!p;
}
static ssize_t set_ignore_ce(struct sys_device *s,
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 36c3dc7b899..a7aa8f90095 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -66,6 +66,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
};
/*
+ * Not sure about some of these
+ */
+static const u64 p6_perfmon_event_map[] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
+};
+
+static u64 p6_pmu_event_map(int event)
+{
+ return p6_perfmon_event_map[event];
+}
+
+/*
+ * Counter setting that is specified not to count anything.
+ * We use this to effectively disable a counter.
+ *
+ * L2_RQSTS with 0 MESI unit mask.
+ */
+#define P6_NOP_COUNTER 0x0000002EULL
+
+static u64 p6_pmu_raw_event(u64 event)
+{
+#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
+#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
+#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
+#define P6_EVNTSEL_INV_MASK 0x00800000ULL
+#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
+
+#define P6_EVNTSEL_MASK \
+ (P6_EVNTSEL_EVENT_MASK | \
+ P6_EVNTSEL_UNIT_MASK | \
+ P6_EVNTSEL_EDGE_MASK | \
+ P6_EVNTSEL_INV_MASK | \
+ P6_EVNTSEL_COUNTER_MASK)
+
+ return event & P6_EVNTSEL_MASK;
+}
+
+
+/*
* Intel PerfMon v3. Used on Core2 and later.
*/
static const u64 intel_perfmon_event_map[] =
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
{
struct perf_counter_attr *attr = &counter->attr;
struct hw_perf_counter *hwc = &counter->hw;
+ u64 config;
int err;
if (!x86_pmu_initialized())
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
+
/*
* The generic map:
*/
- hwc->config |= x86_pmu.event_map(attr->config);
+ config = x86_pmu.event_map(attr->config);
+
+ if (config == 0)
+ return -ENOENT;
+
+ if (config == -1LL)
+ return -EINVAL;
+
+ hwc->config |= config;
return 0;
}
+static void p6_pmu_disable_all(void)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ u64 val;
+
+ if (!cpuc->enabled)
+ return;
+
+ cpuc->enabled = 0;
+ barrier();
+
+ /* p6 only has one enable register */
+ rdmsrl(MSR_P6_EVNTSEL0, val);
+ val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
static void intel_pmu_disable_all(void)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -767,6 +840,23 @@ void hw_perf_disable(void)
return x86_pmu.disable_all();
}
+static void p6_pmu_enable_all(void)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ unsigned long val;
+
+ if (cpuc->enabled)
+ return;
+
+ cpuc->enabled = 1;
+ barrier();
+
+ /* p6 only has one enable register */
+ rdmsrl(MSR_P6_EVNTSEL0, val);
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+ wrmsrl(MSR_P6_EVNTSEL0, val);
+}
+
static void intel_pmu_enable_all(void)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
barrier();
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_counter *counter = cpuc->counters[idx];
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
- continue;
+
+ val = counter->hw.config;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
@@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack)
static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
- int err;
- err = checking_wrmsrl(hwc->config_base + idx,
+ (void)checking_wrmsrl(hwc->config_base + idx,
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
- int err;
- err = checking_wrmsrl(hwc->config_base + idx,
- hwc->config);
+ (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
}
static inline void
@@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
- int err;
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
+ (void)checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static inline void
+p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ u64 val = P6_NOP_COUNTER;
+
+ if (cpuc->enabled)
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+ (void)checking_wrmsrl(hwc->config_base + idx, val);
}
static inline void
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
err = checking_wrmsrl(hwc->config_base, ctrl_val);
}
+static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+ struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ u64 val;
+
+ val = hwc->config;
+ if (cpuc->enabled)
+ val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+
+ (void)checking_wrmsrl(hwc->config_base + idx, val);
+}
+
+
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
{
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
if (cpuc->enabled)
x86_pmu_enable_counter(hwc, idx);
- else
- x86_pmu_disable_counter(hwc, idx);
}
static int
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
local_irq_restore(flags);
}
+static int p6_pmu_handle_irq(struct pt_regs *regs)
+{
+ struct perf_sample_data data;
+ struct cpu_hw_counters *cpuc;
+ struct perf_counter *counter;
+ struct hw_perf_counter *hwc;
+ int idx, handled = 0;
+ u64 val;
+
+ data.regs = regs;
+ data.addr = 0;
+
+ cpuc = &__get_cpu_var(cpu_hw_counters);
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ counter = cpuc->counters[idx];
+ hwc = &counter->hw;
+
+ val = x86_perf_counter_update(counter, hwc, idx);
+ if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+ continue;
+
+ /*
+ * counter overflow
+ */
+ handled = 1;
+ data.period = counter->hw.last_period;
+
+ if (!x86_perf_counter_set_period(counter, hwc, idx))
+ continue;
+
+ if (perf_counter_overflow(counter, 1, &data))
+ p6_pmu_disable_counter(hwc, idx);
+ }
+
+ if (handled)
+ inc_irq_stat(apic_perf_irqs);
+
+ return handled;
+}
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_counters *cpuc;
- int bit, cpu, loops;
+ int bit, loops;
u64 ack, status;
data.regs = regs;
data.addr = 0;
- cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ cpuc = &__get_cpu_var(cpu_hw_counters);
perf_disable();
status = intel_pmu_get_status();
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
struct cpu_hw_counters *cpuc;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
- int cpu, idx, handled = 0;
+ int idx, handled = 0;
u64 val;
data.regs = regs;
data.addr = 0;
- cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ cpuc = &__get_cpu_var(cpu_hw_counters);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
.priority = 1
};
+static struct x86_pmu p6_pmu = {
+ .name = "p6",
+ .handle_irq = p6_pmu_handle_irq,
+ .disable_all = p6_pmu_disable_all,
+ .enable_all = p6_pmu_enable_all,
+ .enable = p6_pmu_enable_counter,
+ .disable = p6_pmu_disable_counter,
+ .eventsel = MSR_P6_EVNTSEL0,
+ .perfctr = MSR_P6_PERFCTR0,
+ .event_map = p6_pmu_event_map,
+ .raw_event = p6_pmu_raw_event,
+ .max_events = ARRAY_SIZE(p6_perfmon_event_map),
+ .max_period = (1ULL << 31) - 1,
+ .version = 0,
+ .num_counters = 2,
+ /*
+ * Counters have 40 bits implemented. However they are designed such
+ * that bits [32-39] are sign extensions of bit 31. As such the
+ * effective width of a counter for P6-like PMU is 32 bits only.
+ *
+ * See IA-32 Intel Architecture Software developer manual Vol 3B
+ */
+ .counter_bits = 32,
+ .counter_mask = (1ULL << 32) - 1,
+};
+
static struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
@@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
.max_period = (1ULL << 47) - 1,
};
+static int p6_pmu_init(void)
+{
+ switch (boot_cpu_data.x86_model) {
+ case 1:
+ case 3: /* Pentium Pro */
+ case 5:
+ case 6: /* Pentium II */
+ case 7:
+ case 8:
+ case 11: /* Pentium III */
+ break;
+ case 9:
+ case 13:
+ /* Pentium M */
+ break;
+ default:
+ pr_cont("unsupported p6 CPU model %d ",
+ boot_cpu_data.x86_model);
+ return -ENODEV;
+ }
+
+ if (!cpu_has_apic) {
+ pr_info("no Local APIC, try rebooting with lapic");
+ return -ENODEV;
+ }
+
+ x86_pmu = p6_pmu;
+
+ return 0;
+}
+
static int intel_pmu_init(void)
{
union cpuid10_edx edx;
@@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
unsigned int ebx;
int version;
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
+ /* check for P6 processor family */
+ if (boot_cpu_data.x86 == 6) {
+ return p6_pmu_init();
+ } else {
return -ENODEV;
+ }
+ }
/*
* Check whether the Architectural PerfMon supports
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 696f0e475c2..92b7703d3d5 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -187,7 +187,7 @@ static void __init apic_intr_init(void)
#ifdef CONFIG_X86_THERMAL_VECTOR
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
-#ifdef CONFIG_X86_THRESHOLD
+#ifdef CONFIG_X86_MCE_THRESHOLD
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC)
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 846510b78a0..2a62d843f01 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
static struct irqaction mfgptirq = {
.handler = mfgpt_tick,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
.name = "mfgpt-timer"
};
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index d2d1ce8170f..508e982dd07 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -249,6 +249,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
+ { /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
+ .callback = set_bios_reboot,
+ .ident = "CompuLab SBC-FITPC2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index de2cab13284..63f32d220ef 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -672,6 +672,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
},
},
+ {
+ /*
+ * AMI BIOS with low memory corruption was found on Intel DG45ID board.
+ * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
+ * match only DMI_BOARD_NAME and see if there is more bad products
+ * with this vendor.
+ */
+ .callback = dmi_low_memory_corruption,
+ .ident = "AMI BIOS",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
+ },
+ },
#endif
{}
};
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 367e8788204..59f31d2dd43 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -112,11 +112,6 @@ SECTIONS
_sdata = .;
DATA_DATA
CONSTRUCTORS
-
-#ifdef CONFIG_X86_64
- /* End of data section */
- _edata = .;
-#endif
} :data
#ifdef CONFIG_X86_32
@@ -156,10 +151,8 @@ SECTIONS
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
*(.data.read_mostly)
-#ifdef CONFIG_X86_32
/* End of data section */
_edata = .;
-#endif
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8e43bdd4545..af8f9650058 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
+void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
pgtable_page_dtor(pte);
paravirt_release_pte(page_to_pfn(pte));
@@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
}
#if PAGETABLE_LEVELS > 2
-void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pmd));
}
#if PAGETABLE_LEVELS > 3
-void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pud));
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 2dfcbf9df2a..dbb5381f7b3 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -79,8 +79,10 @@ static __init void bad_srat(void)
acpi_numa = -1;
for (i = 0; i < MAX_LOCAL_APIC; i++)
apicid_to_node[i] = NUMA_NO_NODE;
- for (i = 0; i < MAX_NUMNODES; i++)
- nodes_add[i].start = nodes[i].end = 0;
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ nodes[i].start = nodes[i].end = 0;
+ nodes_add[i].start = nodes_add[i].end = 0;
+ }
remove_all_active_ranges();
}
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 31c220faca0..0d766f9c108 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -42,6 +42,6 @@
#include <asm-generic/tlb.h>
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _XTENSA_TLB_H */