summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorWeijie Yang <weijie.yang@samsung.com>2015-02-10 23:11:39 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 23:30:34 +0100
commit4c5018ce06c6be292d4ed96cecf2c8dda361a923 (patch)
tree0e695e86f11df493ad7fcf73daf7fc0fb8ce6c3f /mm/page_alloc.c
parentmm: hugetlb: fix type of hugetlb_treat_as_movable variable (diff)
downloadlinux-4c5018ce06c6be292d4ed96cecf2c8dda361a923.tar.xz
linux-4c5018ce06c6be292d4ed96cecf2c8dda361a923.zip
mm/page_alloc.c: place zone_id check before VM_BUG_ON_PAGE check
If the freeing page and its buddy page are not at the same zone, the current holding zone->lock for the freeing page cann't prevent buddy page getting allocated, this could trigger VM_BUG_ON_PAGE in page_is_buddy() at a very tiny chance, such as: cpu 0: cpu 1: hold zone_1 lock check page and it buddy PageBuddy(buddy) is true hold zone_2 lock page_order(buddy) == order is true alloc buddy trigger VM_BUG_ON_PAGE(page_count(buddy) != 0) zone_1->lock prevents the freeing page getting allocated zone_2->lock prevents the buddy page getting allocated they are not the same zone->lock. If we can't remove the zone_id check statement, it's better handle this rare race. This patch fixes this by placing the zone_id check before the VM_BUG_ON_PAGE check. Signed-off-by: Weijie Yang <weijie.yang@samsung.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e20f9c2fa5a..f121050e8530 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -552,17 +552,15 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
return 0;
if (page_is_guard(buddy) && page_order(buddy) == order) {
- VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
-
if (page_zone_id(page) != page_zone_id(buddy))
return 0;
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
+
return 1;
}
if (PageBuddy(buddy) && page_order(buddy) == order) {
- VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
-
/*
* zone check is done late to avoid uselessly
* calculating zone/node ids for pages that could
@@ -571,6 +569,8 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
if (page_zone_id(page) != page_zone_id(buddy))
return 0;
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
+
return 1;
}
return 0;