aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-10-06 15:02:30 +0200
committerIngo Molnar <mingo@elte.hu>2009-10-06 15:02:34 +0200
commitd9b2002c406011164f245de7a81304625989f1c9 (patch)
treea2bb74773cd1409acbec5eb2fbba2ae9889d55e8 /arch/arm/mm
parentc3b32fcbc7f4fd9a9b84718b991b175b0fd53f8c (diff)
parent906010b2134e14a2e377decbadd357b3d0ab9c6a (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: Upcoming patch is dependent on a fix in perf/urgent. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/fault.c110
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/arm/mm/init.c12
5 files changed, 83 insertions, 56 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5fe595aeba6..8d43e58f924 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -771,3 +771,8 @@ config CACHE_XSC3L2
select OUTER_CACHE
help
This option enables the L2 cache on XScale3.
+
+config ARM_L1_CACHE_SHIFT
+ int
+ default 6 if ARCH_OMAP3
+ default 5
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc7438..6bda76a4319 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
}
spin_unlock(&cpu_asid_lock);
- mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
mm->context.id = asid;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index cc8829d7e11..379f7855605 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,19 @@
#include "fault.h"
+/*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF (1 << 31)
+#define FSR_WRITE (1 << 11)
+#define FSR_FS4 (1 << 10)
+#define FSR_FS3_0 (15)
+
+static inline int fsr_fs(unsigned int fsr)
+{
+ return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
+}
+
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
@@ -182,18 +195,35 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
-static int
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (fsr & FSR_WRITE)
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
- int fault, mask;
+ int fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
- if (!vma)
+ if (unlikely(!vma))
goto out;
- if (vma->vm_start > addr)
+ if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
@@ -201,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* memory access, so we can handle it.
*/
good_area:
- if (fsr & (1 << 11)) /* write? */
- mask = VM_WRITE;
- else
- mask = VM_READ|VM_EXEC|VM_WRITE;
-
- fault = VM_FAULT_BADACCESS;
- if (!(vma->vm_flags & mask))
+ if (access_error(fsr, vma)) {
+ fault = VM_FAULT_BADACCESS;
goto out;
+ }
/*
- * If for any reason at all we couldn't handle
- * the fault, make sure we exit gracefully rather
- * than endlessly redo the fault.
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the fault.
*/
-survive:
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- return fault;
- BUG();
- }
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR))
+ return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
return fault;
-out_of_memory:
- if (!is_global_init(tsk))
- goto out;
-
- /*
- * If we are out of memory for pid1, sleep for a while and retry
- */
- up_read(&mm->mmap_sem);
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
-
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
goto good_area;
@@ -278,6 +285,13 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case, we'll have missed the might_sleep() from
+ * down_read()
+ */
+ might_sleep();
}
fault = __do_page_fault(mm, addr, fsr, tsk);
@@ -289,6 +303,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we
+ * got oom-killed)
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
@@ -296,16 +320,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs))
goto no_context;
- if (fault & VM_FAULT_OOM) {
- /*
- * We ran out of memory, or some other thing
- * happened to us that made us unable to handle
- * the page fault gracefully.
- */
- printk("VM: killing process %s\n", tsk->comm);
- do_group_exit(SIGKILL);
- return 0;
- }
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
@@ -489,10 +503,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
asmlinkage void __exception
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
- const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
- if (!inf->fn(addr, fsr, regs))
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
@@ -508,6 +522,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
- do_translation_fault(addr, 0, regs);
+ do_translation_fault(addr, FSR_LNX_PF, regs);
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 575f3ad722e..b27942909b2 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
return;
}
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write)
{
if (cache_is_vivt()) {
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
/* VIPT non-aliasing cache */
- if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
/* only flushing the kernel mapping on non-aliasing VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ea36186f32c..877c492f8e1 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -596,8 +596,8 @@ void __init mem_init(void)
printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
"%dK data, %dK init, %luK highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- codesize >> 10, datasize >> 10, initsize >> 10,
+ nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10,
+ datasize >> 10, initsize >> 10,
(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
@@ -613,6 +613,14 @@ void __init mem_init(void)
void free_initmem(void)
{
+#ifdef CONFIG_HAVE_TCM
+ extern char *__tcm_start, *__tcm_end;
+
+ totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)),
+ __phys_to_pfn(__pa(__tcm_end)),
+ "TCM link");
+#endif
+
if (!machine_is_integrator() && !machine_is_cintegrator())
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),