summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorYu Zhao <yuzhao@google.com>2021-07-01 03:49:48 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-01 05:47:28 +0200
commit2d2b8d2b67713da5de333a8849342503a9f21c60 (patch)
tree927354b2ae89b6814e6abb59de6f5f0b27945c97 /mm/vmscan.c
parentuserfaultfd/selftests: exercise minor fault handling shmem support (diff)
downloadlinux-2d2b8d2b67713da5de333a8849342503a9f21c60.tar.xz
linux-2d2b8d2b67713da5de333a8849342503a9f21c60.zip
mm/vmscan.c: fix potential deadlock in reclaim_pages()
Theoretically without the protect from memalloc_noreclaim_save() and memalloc_noreclaim_restore(), reclaim_pages() can go into the block I/O layer recursively and deadlock. Querying 'reclaim_pages' in our kernel crash databases didn't yield any results. So the deadlock seems unlikely to happen. A possible explanation is that the only user of reclaim_pages(), i.e., MADV_PAGEOUT, is usually called before memory pressure builds up, e.g., on Android and Chrome OS. Under such a condition, allocations in the block I/O layer can be fulfilled without diverting to direct reclaim and therefore the recursion is avoided. Link: https://lkml.kernel.org/r/20210622074642.785473-1-yuzhao@google.com Link: https://lkml.kernel.org/r/20210614194727.2684053-1-yuzhao@google.com Signed-off-by: Yu Zhao <yuzhao@google.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/vmscan.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d7c3cb8688dd..7b52ab166aae 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1701,6 +1701,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
unsigned int nr_reclaimed;
struct page *page, *next;
LIST_HEAD(clean_pages);
+ unsigned int noreclaim_flag;
list_for_each_entry_safe(page, next, page_list, lru) {
if (!PageHuge(page) && page_is_file_lru(page) &&
@@ -1711,8 +1712,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
}
}
+ /*
+ * We should be safe here since we are only dealing with file pages and
+ * we are not kswapd and therefore cannot write dirty file pages. But
+ * call memalloc_noreclaim_save() anyway, just in case these conditions
+ * change in the future.
+ */
+ noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
&stat, true);
+ memalloc_noreclaim_restore(noreclaim_flag);
+
list_splice(&clean_pages, page_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
@@ -2306,6 +2316,7 @@ unsigned long reclaim_pages(struct list_head *page_list)
LIST_HEAD(node_page_list);
struct reclaim_stat dummy_stat;
struct page *page;
+ unsigned int noreclaim_flag;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
@@ -2314,6 +2325,8 @@ unsigned long reclaim_pages(struct list_head *page_list)
.may_swap = 1,
};
+ noreclaim_flag = memalloc_noreclaim_save();
+
while (!list_empty(page_list)) {
page = lru_to_page(page_list);
if (nid == NUMA_NO_NODE) {
@@ -2350,6 +2363,8 @@ unsigned long reclaim_pages(struct list_head *page_list)
}
}
+ memalloc_noreclaim_restore(noreclaim_flag);
+
return nr_reclaimed;
}