summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMiaohe Lin <linmiaohe@huawei.com>2022-04-29 08:40:43 +0200
committerakpm <akpm@linux-foundation.org>2022-05-27 18:33:44 +0200
commit4a1c3839108afcfec02f4d62d6862b2451b442ab (patch)
tree269f48af652f59b12388a6b1a81e58b2694d5b72 /mm
parentmm/z3fold: put z3fold page back into unbuddied list when reclaim or migration... (diff)
downloadlinux-4a1c3839108afcfec02f4d62d6862b2451b442ab.tar.xz
linux-4a1c3839108afcfec02f4d62d6862b2451b442ab.zip
mm/z3fold: always clear PAGE_CLAIMED under z3fold page lock
Think about the below race window: CPU1 CPU2 z3fold_reclaim_page z3fold_free test_and_set_bit PAGE_CLAIMED failed to reclaim page z3fold_page_lock(zhdr); add back to the lru list; z3fold_page_unlock(zhdr); get_z3fold_header page_claimed=test_and_set_bit PAGE_CLAIMED clear_bit(PAGE_CLAIMED, &page->private); if (!page_claimed) /* it's false true */ free_handle is not called free_handle won't be called in this case. So z3fold_buddy_slots will leak. Fix it by always clear PAGE_CLAIMED under z3fold page lock. Link: https://lkml.kernel.org/r/20220429064051.61552-8-linmiaohe@huawei.com Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: Vitaly Wool <vitaly.wool@konsulko.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/z3fold.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index a1c150fc8def..4a3cd2ff15b0 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -1221,8 +1221,8 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
return;
}
if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
- put_z3fold_header(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ put_z3fold_header(zhdr);
return;
}
if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
@@ -1424,8 +1424,8 @@ next:
spin_unlock(&pool->lock);
if (list_empty(&zhdr->buddy))
add_to_unbuddied(pool, zhdr);
- z3fold_page_unlock(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
}
/* We started off locked to we need to lock the pool back */
@@ -1577,8 +1577,8 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
if (!z3fold_page_trylock(zhdr))
return -EAGAIN;
if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
- z3fold_page_unlock(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
return -EBUSY;
}
if (work_pending(&zhdr->work)) {