diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-09-25 23:31:55 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 08:48:52 -0700 |
commit | 89fa30242facca249aead2aac03c4c69764f911c (patch) | |
tree | 1ac46b4777b819f2a4793d8e37330576ae5089ec | |
parent | 4415cc8df630b05d3a54267d5f3e5c0b63a4ec05 (diff) |
[PATCH] NUMA: Add zone_to_nid function
There are many places where we need to determine the node of a zone.
Currently we use a difficult to read sequence of pointer dereferencing.
Put that into an inline function and use throughout VM. Maybe we can find
a way to optimize the lookup in the future.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/mm/discontig.c | 2 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 7 | ||||
-rw-r--r-- | kernel/cpuset.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/oom_kill.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
9 files changed, 17 insertions, 13 deletions
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index 07c300f9376..fb5d8b747de 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c @@ -422,7 +422,7 @@ void __init set_highmem_pages_init(int bad_ppro) zone_end_pfn = zone_start_pfn + zone->spanned_pages; printk("Initializing %s for node %d (%08lx:%08lx)\n", - zone->name, zone->zone_pgdat->node_id, + zone->name, zone_to_nid(zone), zone_start_pfn, zone_end_pfn); for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index c7329615ef9..25ad28d63e8 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -551,7 +551,7 @@ void show_mem(void) printk("Zone list for zone %d on node %d: ", j, i); for (k = 0; zl->zones[k] != NULL; k++) - printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name); + printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name); printk("\n"); } } diff --git a/include/linux/mm.h b/include/linux/mm.h index f2018775b99..856f0ee7e84 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -499,12 +499,17 @@ static inline struct zone *page_zone(struct page *page) return zone_table[page_zone_id(page)]; } +static inline unsigned long zone_to_nid(struct zone *zone) +{ + return zone->zone_pgdat->node_id; +} + static inline unsigned long page_to_nid(struct page *page) { if (FLAGS_HAS_NODE) return (page->flags >> NODES_PGSHIFT) & NODES_MASK; else - return page_zone(page)->zone_pgdat->node_id; + return zone_to_nid(page_zone(page)); } static inline unsigned long page_to_section(struct page *page) { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 76940361273..cff41511269 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2245,7 +2245,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) int i; for (i = 0; zl->zones[i]; i++) { - int nid = zl->zones[i]->zone_pgdat->node_id; + int nid = zone_to_nid(zl->zones[i]); if (node_isset(nid, current->mems_allowed)) return 1; @@ -2318,7 +2318,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; - node = z->zone_pgdat->node_id; + node = zone_to_nid(z); might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); if (node_isset(node, current->mems_allowed)) return 1; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3aceadce1a7..7c7d03dbf73 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -72,7 +72,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, struct zone **z; for (z = zonelist->zones; *z; z++) { - nid = (*z)->zone_pgdat->node_id; + nid = zone_to_nid(*z); if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && !list_empty(&hugepage_freelists[nid])) break; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8002e1faccd..38f89650bc8 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -487,7 +487,7 @@ static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) switch (p->policy) { case MPOL_BIND: for (i = 0; p->v.zonelist->zones[i]; i++) - node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, + node_set(zone_to_nid(p->v.zonelist->zones[i]), *nodes); break; case MPOL_DEFAULT: @@ -1145,7 +1145,7 @@ unsigned slab_node(struct mempolicy *policy) * Follow bind policy behavior and start allocation at the * first node. */ - return policy->v.zonelist->zones[0]->zone_pgdat->node_id; + return zone_to_nid(policy->v.zonelist->zones[0]); case MPOL_PREFERRED: if (policy->v.preferred_node >= 0) @@ -1649,7 +1649,7 @@ void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) nodes_clear(nodes); for (z = pol->v.zonelist->zones; *z; z++) - node_set((*z)->zone_pgdat->node_id, nodes); + node_set(zone_to_nid(*z), nodes); nodes_remap(tmp, nodes, *mpolmask, *newmask); nodes = tmp; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f1c0ef1fd21..bada3d03119 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -177,8 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) for (z = zonelist->zones; *z; z++) if (cpuset_zone_allowed(*z, gfp_mask)) - node_clear((*z)->zone_pgdat->node_id, - nodes); + node_clear(zone_to_nid(*z), nodes); else return CONSTRAINT_CPUSET; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cf913bdd433..51070b6d593 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1217,7 +1217,7 @@ unsigned int nr_free_pagecache_pages(void) #ifdef CONFIG_NUMA static void show_node(struct zone *zone) { - printk("Node %d ", zone->zone_pgdat->node_id); + printk("Node %ld ", zone_to_nid(zone)); } #else #define show_node(zone) do { } while (0) diff --git a/mm/vmscan.c b/mm/vmscan.c index b950f193816..87779dda4ec 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1661,7 +1661,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * over remote processors and spread off node memory allocations * as wide as possible. */ - node_id = zone->zone_pgdat->node_id; + node_id = zone_to_nid(zone); mask = node_to_cpumask(node_id); if (!cpus_empty(mask) && node_id != numa_node_id()) return 0; |