aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h110
1 files changed, 102 insertions, 8 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4e5627379b0..4c4522a51a3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -7,12 +7,14 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
+#include <linux/bitops.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
#include <linux/seqlock.h>
#include <linux/nodemask.h>
+#include <linux/pageblock-flags.h>
#include <asm/atomic.h>
#include <asm/page.h>
@@ -32,8 +34,29 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
+#define MIGRATE_UNMOVABLE 0
+#define MIGRATE_RECLAIMABLE 1
+#define MIGRATE_MOVABLE 2
+#define MIGRATE_RESERVE 3
+#define MIGRATE_ISOLATE 4 /* can't allocate from here */
+#define MIGRATE_TYPES 5
+
+#define for_each_migratetype_order(order, type) \
+ for (order = 0; order < MAX_ORDER; order++) \
+ for (type = 0; type < MIGRATE_TYPES; type++)
+
+extern int page_group_by_mobility_disabled;
+
+static inline int get_pageblock_migratetype(struct page *page)
+{
+ if (unlikely(page_group_by_mobility_disabled))
+ return MIGRATE_UNMOVABLE;
+
+ return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
+}
+
struct free_area {
- struct list_head free_list;
+ struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
@@ -222,6 +245,14 @@ struct zone {
#endif
struct free_area free_area[MAX_ORDER];
+#ifndef CONFIG_SPARSEMEM
+ /*
+ * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
+ * In SPARSEMEM, this map is stored in struct mem_section
+ */
+ unsigned long *pageblock_flags;
+#endif /* CONFIG_SPARSEMEM */
+
ZONE_PADDING(_pad1_)
@@ -232,10 +263,7 @@ struct zone {
unsigned long nr_scan_active;
unsigned long nr_scan_inactive;
unsigned long pages_scanned; /* since last reclaim */
- int all_unreclaimable; /* All pages pinned */
-
- /* A count of how many reclaimers are scanning this zone */
- atomic_t reclaim_in_progress;
+ unsigned long flags; /* zone flags, see below */
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
@@ -313,6 +341,42 @@ struct zone {
const char *name;
} ____cacheline_internodealigned_in_smp;
+typedef enum {
+ ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
+ ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
+ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
+} zone_flags_t;
+
+static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
+{
+ set_bit(flag, &zone->flags);
+}
+
+static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
+{
+ return test_and_set_bit(flag, &zone->flags);
+}
+
+static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
+{
+ clear_bit(flag, &zone->flags);
+}
+
+static inline int zone_is_all_unreclaimable(const struct zone *zone)
+{
+ return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
+}
+
+static inline int zone_is_reclaim_locked(const struct zone *zone)
+{
+ return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
+}
+
+static inline int zone_is_oom_locked(const struct zone *zone)
+{
+ return test_bit(ZONE_OOM_LOCKED, &zone->flags);
+}
+
/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -324,6 +388,17 @@ struct zone {
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
#ifdef CONFIG_NUMA
+
+/*
+ * The NUMA zonelists are doubled becausse we need zonelists that restrict the
+ * allocations to a single node for GFP_THISNODE.
+ *
+ * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback
+ * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE)
+ */
+#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
+
+
/*
* We cache key information from each zonelist for smaller cache
* footprint when scanning for free pages in get_page_from_freelist().
@@ -389,6 +464,7 @@ struct zonelist_cache {
unsigned long last_full_zap; /* when last zap'd (jiffies) */
};
#else
+#define MAX_ZONELISTS MAX_NR_ZONES
struct zonelist_cache;
#endif
@@ -455,7 +531,7 @@ extern struct page *mem_map;
struct bootmem_data;
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
- struct zonelist node_zonelists[MAX_NR_ZONES];
+ struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP
struct page *node_mem_map;
@@ -708,6 +784,9 @@ extern struct zone *next_zone(struct zone *zone);
#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
+#define SECTION_BLOCKFLAGS_BITS \
+ ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
+
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif
@@ -727,6 +806,9 @@ struct mem_section {
* before using it wrong.
*/
unsigned long section_mem_map;
+
+ /* See declaration of similar field in struct zone */
+ unsigned long *pageblock_flags;
};
#ifdef CONFIG_SPARSEMEM_EXTREME
@@ -771,12 +853,17 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int valid_section(struct mem_section *section)
+static inline int present_section(struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
-static inline int section_has_mem_map(struct mem_section *section)
+static inline int present_section_nr(unsigned long nr)
+{
+ return present_section(__nr_to_section(nr));
+}
+
+static inline int valid_section(struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
@@ -798,6 +885,13 @@ static inline int pfn_valid(unsigned long pfn)
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
+static inline int pfn_present(unsigned long pfn)
+{
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+ return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+}
+
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate