diff options
-rw-r--r-- | include/linux/memcontrol.h | 16 | ||||
-rw-r--r-- | mm/memcontrol.c | 29 | ||||
-rw-r--r-- | mm/swap.c | 34 | ||||
-rw-r--r-- | mm/vmscan.c | 27 |
4 files changed, 83 insertions, 23 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b1defd6a278..36b8ebb39b8 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -105,6 +105,10 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, enum lru_list lru); +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone); +struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; @@ -271,6 +275,18 @@ mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, } +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) +{ + return NULL; +} + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + return NULL; +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 313247e6c50..7b7f4dc0503 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -103,6 +103,8 @@ struct mem_cgroup_per_zone { */ struct list_head lists[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS]; + + struct zone_reclaim_stat reclaim_stat; }; /* Macro for accessing counter */ #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) @@ -458,6 +460,33 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, return MEM_CGROUP_ZSTAT(mz, lru); } +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone) +{ + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); + + return &mz->reclaim_stat; +} + +struct zone_reclaim_stat * +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + struct page_cgroup *pc; + struct mem_cgroup_per_zone *mz; + + if (mem_cgroup_disabled()) + return NULL; + + pc = lookup_page_cgroup(page); + mz = page_cgroup_zoneinfo(pc); + if (!mz) + return NULL; + + return &mz->reclaim_stat; +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, diff --git a/mm/swap.c b/mm/swap.c index 26b07e7bc3d..8adb9feb61e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -151,13 +151,32 @@ void rotate_reclaimable_page(struct page *page) } } +static void update_page_reclaim_stat(struct zone *zone, struct page *page, + int file, int rotated) +{ + struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; + struct zone_reclaim_stat *memcg_reclaim_stat; + + memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); + + reclaim_stat->recent_scanned[file]++; + if (rotated) + reclaim_stat->recent_rotated[file]++; + + if (!memcg_reclaim_stat) + return; + + memcg_reclaim_stat->recent_scanned[file]++; + if (rotated) + memcg_reclaim_stat->recent_rotated[file]++; +} + /* * FIXME: speed this up? */ void activate_page(struct page *page) { struct zone *zone = page_zone(page); - struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { @@ -170,8 +189,7 @@ void activate_page(struct page *page) add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - reclaim_stat->recent_rotated[!!file]++; - reclaim_stat->recent_scanned[!!file]++; + update_page_reclaim_stat(zone, page, !!file, 1); } spin_unlock_irq(&zone->lru_lock); } @@ -386,7 +404,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { int i; struct zone *zone = NULL; - struct zone_reclaim_stat *reclaim_stat = NULL; VM_BUG_ON(is_unevictable_lru(lru)); @@ -394,24 +411,23 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); int file; + int active; if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; - reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); } VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); + active = is_active_lru(lru); file = is_file_lru(lru); - reclaim_stat->recent_scanned[file]++; - if (is_active_lru(lru)) { + if (active) SetPageActive(page); - reclaim_stat->recent_rotated[file]++; - } + update_page_reclaim_stat(zone, page, file, active); add_page_to_lru_list(zone, page, lru); } if (zone) diff --git a/mm/vmscan.c b/mm/vmscan.c index d958d624d3a..56fc7abe4d2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -133,6 +133,9 @@ static DECLARE_RWSEM(shrinker_rwsem); static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, struct scan_control *sc) { + if (!scan_global_lru(sc)) + return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); + return &zone->reclaim_stat; } @@ -1087,17 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_INACTIVE_ANON, -count[LRU_INACTIVE_ANON]); - if (scan_global_lru(sc)) { + if (scan_global_lru(sc)) zone->pages_scanned += nr_scan; - reclaim_stat->recent_scanned[0] += - count[LRU_INACTIVE_ANON]; - reclaim_stat->recent_scanned[0] += - count[LRU_ACTIVE_ANON]; - reclaim_stat->recent_scanned[1] += - count[LRU_INACTIVE_FILE]; - reclaim_stat->recent_scanned[1] += - count[LRU_ACTIVE_FILE]; - } + + reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; + reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; + reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE]; + reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE]; + spin_unlock_irq(&zone->lru_lock); nr_scanned += nr_scan; @@ -1155,7 +1155,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(zone, page, lru); - if (PageActive(page) && scan_global_lru(sc)) { + if (PageActive(page)) { int file = !!page_is_file_cache(page); reclaim_stat->recent_rotated[file]++; } @@ -1230,8 +1230,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, */ if (scan_global_lru(sc)) { zone->pages_scanned += pgscanned; - reclaim_stat->recent_scanned[!!file] += pgmoved; } + reclaim_stat->recent_scanned[!!file] += pgmoved; if (file) __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); @@ -1272,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * This helps balance scan pressure between file and anonymous * pages in get_scan_ratio. */ - if (scan_global_lru(sc)) - reclaim_stat->recent_rotated[!!file] += pgmoved; + reclaim_stat->recent_rotated[!!file] += pgmoved; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); |