summaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h34
1 files changed, 22 insertions, 12 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3ac040f19369..650ba2fb3301 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -140,25 +140,29 @@ enum lru_list {
NR_LRU_LISTS
};
-#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
+#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
-#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
+#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
-static inline int is_file_lru(enum lru_list l)
+static inline int is_file_lru(enum lru_list lru)
{
- return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
+ return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
-static inline int is_active_lru(enum lru_list l)
+static inline int is_active_lru(enum lru_list lru)
{
- return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
+ return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
-static inline int is_unevictable_lru(enum lru_list l)
+static inline int is_unevictable_lru(enum lru_list lru)
{
- return (l == LRU_UNEVICTABLE);
+ return (lru == LRU_UNEVICTABLE);
}
+struct lruvec {
+ struct list_head lists[NR_LRU_LISTS];
+};
+
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
@@ -173,6 +177,8 @@ static inline int is_unevictable_lru(enum lru_list l)
#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
+/* Isolate for asynchronous migration */
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
@@ -317,6 +323,12 @@ struct zone {
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];
+ /*
+ * This is a per-zone reserve of pages that should not be
+ * considered dirtyable memory.
+ */
+ unsigned long dirty_balance_reserve;
+
#ifdef CONFIG_NUMA
int node;
/*
@@ -358,10 +370,8 @@ struct zone {
ZONE_PADDING(_pad1_)
/* Fields commonly accessed by the page reclaim scanner */
- spinlock_t lru_lock;
- struct zone_lru {
- struct list_head list;
- } lru[NR_LRU_LISTS];
+ spinlock_t lru_lock;
+ struct lruvec lruvec;
struct zone_reclaim_stat reclaim_stat;