From 9a865ffa34b6117a5e0b67640a084d8c2e198c93 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Fri, 30 Jun 2006 01:55:38 -0700 Subject: [PATCH] zoned vm counters: conversion of nr_slab to per zone counter - Allows reclaim to access counter without looping over processor counts. - Allows accurate statistics on how many pages are used in a zone by the slab. This may become useful to balance slab allocations over various zones. [akpm@osdl.org: bugfix] Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- mm/slab.c | 4 ++-- mm/vmscan.c | 2 +- mm/vmstat.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8350720f98a..a38a11cfb48 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1318,7 +1318,7 @@ void show_free_areas(void) ps.nr_writeback, ps.nr_unstable, nr_free_pages(), - ps.nr_slab, + global_page_state(NR_SLAB), global_page_state(NR_FILE_MAPPED), ps.nr_page_table_pages); diff --git a/mm/slab.c b/mm/slab.c index 0c33820038c..5dcfb904480 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1507,7 +1507,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) atomic_add(nr_pages, &slab_reclaim_pages); - add_page_state(nr_slab, nr_pages); + add_zone_page_state(page_zone(page), NR_SLAB, nr_pages); for (i = 0; i < nr_pages; i++) __SetPageSlab(page + i); return page_address(page); @@ -1522,12 +1522,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; + sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed); while (i--) { BUG_ON(!PageSlab(page)); __ClearPageSlab(page); page++; } - sub_page_state(nr_slab, nr_freed); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; free_pages((unsigned long)addr, cachep->gfporder); diff --git a/mm/vmscan.c b/mm/vmscan.c index 0960846d649..d6942436ac9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1362,7 +1362,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) for_each_zone(zone) lru_pages += zone->nr_active + zone->nr_inactive; - nr_slab = read_page_state(nr_slab); + nr_slab = global_page_state(NR_SLAB); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { reclaim_state.reclaimed_slab = 0; diff --git a/mm/vmstat.c b/mm/vmstat.c index 3baf4dffa62..dc9e6920922 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -398,13 +398,13 @@ static char *vmstat_text[] = { "nr_anon_pages", "nr_mapped", "nr_file_pages", + "nr_slab", /* Page state */ "nr_dirty", "nr_writeback", "nr_unstable", "nr_page_table_pages", - "nr_slab", "pgpgin", "pgpgout", -- cgit v1.2.3