aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h64
1 files changed, 38 insertions, 26 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f22090df7d..93a849f742d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -38,7 +38,7 @@ struct pglist_data;
#if defined(CONFIG_SMP)
struct zone_padding {
char x[0];
-} ____cacheline_maxaligned_in_smp;
+} ____cacheline_internodealigned_in_smp;
#define ZONE_PADDING(name) struct zone_padding name;
#else
#define ZONE_PADDING(name)
@@ -46,7 +46,6 @@ struct zone_padding {
struct per_cpu_pages {
int count; /* number of pages in the list */
- int low; /* low watermark, refill needed */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
struct list_head list; /* the list of pages */
@@ -99,7 +98,7 @@ struct per_cpu_pageset {
/*
* On machines where it is needed (eg PCs) we divide physical memory
- * into multiple physical zones. On a PC we have 4 zones:
+ * into multiple physical zones. On a 32bit PC we have 4 zones:
*
* ZONE_DMA < 16 MB ISA DMA capable memory
* ZONE_DMA32 0 MB Empty
@@ -150,15 +149,17 @@ struct zone {
unsigned long pages_scanned; /* since last reclaim */
int all_unreclaimable; /* All pages pinned */
- /*
- * Does the allocator try to reclaim pages from the zone as soon
- * as it fails a watermark_ok() in __alloc_pages?
- */
- int reclaim_pages;
/* A count of how many reclaimers are scanning this zone */
atomic_t reclaim_in_progress;
/*
+ * timestamp (in jiffies) of the last zone reclaim that did not
+ * result in freeing of pages. This is used to avoid repeated scans
+ * if all memory in the zone is in use.
+ */
+ unsigned long last_unsuccessful_zone_reclaim;
+
+ /*
* prev_priority holds the scanning priority for this zone. It is
* defined as the scanning priority at which we achieved our reclaim
* target at the previous try_to_free_pages() or balance_pgdat()
@@ -234,7 +235,7 @@ struct zone {
* rarely used fields:
*/
char *name;
-} ____cacheline_maxaligned_in_smp;
+} ____cacheline_internodealigned_in_smp;
/*
@@ -389,6 +390,11 @@ static inline struct zone *next_zone(struct zone *zone)
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+static inline int populated_zone(struct zone *zone)
+{
+ return (!!zone->present_pages);
+}
+
static inline int is_highmem_idx(int idx)
{
return (idx == ZONE_HIGHMEM);
@@ -398,6 +404,7 @@ static inline int is_normal_idx(int idx)
{
return (idx == ZONE_NORMAL);
}
+
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
@@ -414,6 +421,16 @@ static inline int is_normal(struct zone *zone)
return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
}
+static inline int is_dma32(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
+}
+
+static inline int is_dma(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
+}
+
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
@@ -422,6 +439,8 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
+int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
@@ -435,7 +454,6 @@ extern struct pglist_data contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
#define MAX_NODES_SHIFT 1
-#define pfn_to_nid(pfn) (0)
#else /* CONFIG_NEED_MULTIPLE_NODES */
@@ -470,6 +488,10 @@ extern struct pglist_data contig_page_data;
#define early_pfn_to_nid(nid) (0UL)
#endif
+#ifdef CONFIG_FLATMEM
+#define pfn_to_nid(pfn) (0)
+#endif
+
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
@@ -564,11 +586,6 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
@@ -598,13 +615,14 @@ static inline int pfn_valid(unsigned long pfn)
* this restriction.
*/
#ifdef CONFIG_NUMA
-#define pfn_to_nid early_pfn_to_nid
-#endif
-
-#define pfn_to_pgdat(pfn) \
+#define pfn_to_nid(pfn) \
({ \
- NODE_DATA(pfn_to_nid(pfn)); \
+ unsigned long __pfn_to_nid_pfn = (pfn); \
+ page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
})
+#else
+#define pfn_to_nid(pfn) (0)
+#endif
#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
@@ -613,12 +631,6 @@ void sparse_init(void);
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
-#else
-#define early_pfn_in_nid(pfn, nid) (1)
-#endif
-
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
#endif