summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2022-11-22 14:12:29 +0100
committerAndrew Morton <akpm@linux-foundation.org>2022-12-01 00:59:01 +0100
commita4bafffb5dc5be6c7a3b77b2de0cbaf6776a3c8b (patch)
tree04cf1f17fdbbe496d109fbeb331f5a12ef88e17b /mm
parentmm/page_alloc: leave IRQs enabled for per-cpu page allocations (diff)
downloadlinux-a4bafffb5dc5be6c7a3b77b2de0cbaf6776a3c8b.tar.xz
linux-a4bafffb5dc5be6c7a3b77b2de0cbaf6776a3c8b.zip
mm/page_alloc: simplify locking during free_unref_page_list
While freeing a large list, the zone lock will be released and reacquired to avoid long hold times since commit c24ad77d962c ("mm/page_alloc.c: avoid excessive IRQ disabled times in free_unref_page_list()"). As suggested by Vlastimil Babka, the lockrelease/reacquire logic can be simplified by reusing the logic that acquires a different lock when changing zones. Link: https://lkml.kernel.org/r/20221122131229.5263-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d9d83254c485..5ab9dd29ef7e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3525,13 +3525,19 @@ void free_unref_page_list(struct list_head *list)
list_del(&page->lru);
migratetype = get_pcppage_migratetype(page);
- /* Different zone, different pcp lock. */
- if (zone != locked_zone) {
+ /*
+ * Either different zone requiring a different pcp lock or
+ * excessive lock hold times when freeing a large list of
+ * pages.
+ */
+ if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
if (pcp) {
pcp_spin_unlock(pcp);
pcp_trylock_finish(UP_flags);
}
+ batch_count = 0;
+
/*
* trylock is necessary as pages may be getting freed
* from IRQ or SoftIRQ context after an IO completion.
@@ -3546,7 +3552,6 @@ void free_unref_page_list(struct list_head *list)
continue;
}
locked_zone = zone;
- batch_count = 0;
}
/*
@@ -3558,19 +3563,7 @@ void free_unref_page_list(struct list_head *list)
trace_mm_page_free_batched(page);
free_unref_page_commit(zone, pcp, page, migratetype, 0);
-
- /*
- * Guard against excessive lock hold times when freeing
- * a large list of pages. Lock will be reacquired if
- * necessary on the next iteration.
- */
- if (++batch_count == SWAP_CLUSTER_MAX) {
- pcp_spin_unlock(pcp);
- pcp_trylock_finish(UP_flags);
- batch_count = 0;
- pcp = NULL;
- locked_zone = NULL;
- }
+ batch_count++;
}
if (pcp) {