diff options
author | Johannes Weiner <hannes@saeurebad.de> | 2008-12-01 03:00:35 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-01 07:58:06 -0800 |
commit | 2a1dc509747fdcfdf3a2df818a14908aed86c3d4 (patch) | |
tree | 4dd97b3090d314b42f793a48251497f34835a5e8 | |
parent | 33b07db9f38fe73b3895f8d4db8fdee03e3afec3 (diff) |
vmscan: protect zone rotation stats by lru lock
The zone's rotation statistics must not be accessed without the
corresponding LRU lock held. Fix an unprotected write in
shrink_active_list().
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/vmscan.c | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ea1440b53d..62e7f62fb55 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1248,6 +1248,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, list_add(&page->lru, &l_inactive); } + spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as * rotated, even though they are moved to the inactive list. @@ -1263,7 +1264,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, pgmoved = 0; lru = LRU_BASE + file * LRU_FILE; - spin_lock_irq(&zone->lru_lock); while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); |