aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-07-28 09:30:20 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-07-28 09:30:20 +0100
commit661299d9d0437a0ff72240f3d60016ac3a361a6e (patch)
tree765512576314fc3612b503f182b9ae4e60fcf849 /mm
parent05caac585f8abd6c0113856bc8858e3ef214d8a6 (diff)
parent41c018b7ecb60b1c2c4d5dee0cd37d32a94c45af (diff)
Merge with Linus' 2.6 tree
Diffstat (limited to 'mm')
-rw-r--r--mm/madvise.c13
-rw-r--r--mm/memory.c25
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c4
4 files changed, 24 insertions, 20 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 73180a22877..c8c01a12fea 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,9 +83,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
{
struct file *file = vma->vm_file;
- if (!file)
- return -EBADF;
-
if (file->f_mapping->a_ops->get_xip_page) {
/* no bad return value, but ignore advice */
return 0;
@@ -140,11 +137,16 @@ static long madvise_dontneed(struct vm_area_struct * vma,
return 0;
}
-static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
- unsigned long start, unsigned long end, int behavior)
+static long
+madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
+ unsigned long start, unsigned long end, int behavior)
{
+ struct file *filp = vma->vm_file;
long error = -EBADF;
+ if (!filp)
+ goto out;
+
switch (behavior) {
case MADV_NORMAL:
case MADV_SEQUENTIAL:
@@ -165,6 +167,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev
break;
}
+out:
return error;
}
diff --git a/mm/memory.c b/mm/memory.c
index beabdefa625..6fe77acbc1c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -776,8 +776,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
* Do a quick page-table lookup for a single page.
* mm->page_table_lock must be held.
*/
-static struct page *
-__follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
+static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
+ int read, int write, int accessed)
{
pgd_t *pgd;
pud_t *pud;
@@ -818,9 +818,11 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- if (write && !pte_dirty(pte) && !PageDirty(page))
- set_page_dirty(page);
- mark_page_accessed(page);
+ if (accessed) {
+ if (write && !pte_dirty(pte) &&!PageDirty(page))
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ }
return page;
}
}
@@ -829,16 +831,19 @@ out:
return NULL;
}
-struct page *
+inline struct page *
follow_page(struct mm_struct *mm, unsigned long address, int write)
{
- return __follow_page(mm, address, /*read*/0, write);
+ return __follow_page(mm, address, 0, write, 1);
}
-int
-check_user_page_readable(struct mm_struct *mm, unsigned long address)
+/*
+ * check_user_page_readable() can be called frm niterrupt context by oprofile,
+ * so we need to avoid taking any non-irq-safe locks
+ */
+int check_user_page_readable(struct mm_struct *mm, unsigned long address)
{
- return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL;
+ return __follow_page(mm, address, 1, 0, 0) != NULL;
}
EXPORT_SYMBOL(check_user_page_readable);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cb41c31e7c8..1694845526b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1138,11 +1138,11 @@ void mpol_free_shared_policy(struct shared_policy *p)
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
+ rb_erase(&n->nd, &p->root);
mpol_free(n->policy);
kmem_cache_free(sn_cache, n);
}
spin_unlock(&p->lock);
- p->root = RB_ROOT;
}
/* assumes fs == KERNEL_DS */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1d6ba6a4b59..42bccfb8464 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1861,7 +1861,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long i, j;
- const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
int cpu, nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
@@ -1934,9 +1933,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->zone_mem_map = pfn_to_page(zone_start_pfn);
zone->zone_start_pfn = zone_start_pfn;
- if ((zone_start_pfn) & (zone_required_alignment-1))
- printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n");
-
memmap_init(size, nid, j, zone_start_pfn);
zonetable_add(zone, nid, j, zone_start_pfn, size);