aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-17 17:42:57 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:16:37 -0800
commit05f9ca83596c7801549a2b4eba469d51baf5480f (patch)
tree22270db01a13dda0af9b158662712f9e6b6a934c /arch/sparc64/kernel
parentd61e16df940e02e25679bdc1aee8c25786f6de90 (diff)
[SPARC64]: Randomize mm->mmap_base when PF_RANDOMIZE is set.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/sys_sparc.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 9019b41fc02..7a869138c37 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -25,6 +25,7 @@
#include <linux/syscalls.h>
#include <linux/ipc.h>
#include <linux/personality.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
@@ -358,6 +359,17 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
/* Essentially the same as PowerPC... */
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE) {
+ random_factor = get_random_int();
+ if (test_thread_flag(TIF_32BIT))
+ random_factor &= ((1 * 1024 * 1024) - 1);
+ else
+ random_factor = ((random_factor << PAGE_SHIFT) &
+ 0xffffffffUL);
+ }
+
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
@@ -366,7 +378,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
(current->personality & ADDR_COMPAT_LAYOUT) ||
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
@@ -380,7 +392,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
if (gap > (task_size / 6 * 5))
gap = (task_size / 6 * 5);
- mm->mmap_base = task_size - (gap & PAGE_MASK);
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
mm->unmap_area = arch_unmap_area_topdown;
}