summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 31aa943365d8..6dfa5b24cc79 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1194,9 +1194,14 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
set_pageblock_migratetype(page, start_type);
}
-/* Check whether there is a suitable fallback freepage with requested order. */
-static int find_suitable_fallback(struct free_area *area, unsigned int order,
- int migratetype, bool *can_steal)
+/*
+ * Check whether there is a suitable fallback freepage with requested order.
+ * If only_stealable is true, this function returns fallback_mt only if
+ * we can steal other freepages all together. This would help to reduce
+ * fragmentation due to mixed migratetype pages in one pageblock.
+ */
+int find_suitable_fallback(struct free_area *area, unsigned int order,
+ int migratetype, bool only_stealable, bool *can_steal)
{
int i;
int fallback_mt;
@@ -1216,7 +1221,11 @@ static int find_suitable_fallback(struct free_area *area, unsigned int order,
if (can_steal_fallback(order, migratetype))
*can_steal = true;
- return fallback_mt;
+ if (!only_stealable)
+ return fallback_mt;
+
+ if (*can_steal)
+ return fallback_mt;
}
return -1;
@@ -1238,7 +1247,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
--current_order) {
area = &(zone->free_area[current_order]);
fallback_mt = find_suitable_fallback(area, current_order,
- start_migratetype, &can_steal);
+ start_migratetype, false, &can_steal);
if (fallback_mt == -1)
continue;