diff options
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r-- | mm/z3fold.c | 29 |
1 files changed, 19 insertions, 10 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c index dfcd69d08c1e..6c72b18d8b9c 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -101,6 +101,7 @@ struct z3fold_buddy_slots { * @refcount: reference count for the z3fold page * @work: work_struct for page layout optimization * @slots: pointer to the structure holding buddy slots + * @pool: pointer to the containing pool * @cpu: CPU which this page "belongs" to * @first_chunks: the size of the first buddy in chunks, 0 if free * @middle_chunks: the size of the middle buddy in chunks, 0 if free @@ -114,6 +115,7 @@ struct z3fold_header { struct kref refcount; struct work_struct work; struct z3fold_buddy_slots *slots; + struct z3fold_pool *pool; short cpu; unsigned short first_chunks; unsigned short middle_chunks; @@ -193,8 +195,10 @@ static void compact_page_work(struct work_struct *w); static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, gfp_t gfp) { - struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle, - gfp); + struct z3fold_buddy_slots *slots; + + slots = kmem_cache_alloc(pool->c_handle, + (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); if (slots) { memset(slots->slot, 0, sizeof(slots->slot)); @@ -320,6 +324,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, zhdr->start_middle = 0; zhdr->cpu = -1; zhdr->slots = slots; + zhdr->pool = pool; INIT_LIST_HEAD(&zhdr->buddy); INIT_WORK(&zhdr->work, compact_page_work); return zhdr; @@ -426,7 +431,7 @@ static enum buddy handle_to_buddy(unsigned long handle) static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) { - return slots_to_pool(zhdr->slots); + return zhdr->pool; } static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) @@ -850,7 +855,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, enum buddy bud; bool can_sleep = gfpflags_allow_blocking(gfp); - if (!size || (gfp & __GFP_HIGHMEM)) + if (!size) return -EINVAL; if (size > PAGE_SIZE) @@ -1345,24 +1350,29 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa zhdr = page_address(page); pool = zhdr_to_pool(zhdr); - if (!trylock_page(page)) - return -EAGAIN; - if (!z3fold_page_trylock(zhdr)) { - unlock_page(page); return -EAGAIN; } if (zhdr->mapped_count != 0) { z3fold_page_unlock(zhdr); - unlock_page(page); return -EBUSY; } + if (work_pending(&zhdr->work)) { + z3fold_page_unlock(zhdr); + return -EAGAIN; + } new_zhdr = page_address(newpage); memcpy(new_zhdr, zhdr, PAGE_SIZE); newpage->private = page->private; page->private = 0; z3fold_page_unlock(zhdr); spin_lock_init(&new_zhdr->page_lock); + INIT_WORK(&new_zhdr->work, compact_page_work); + /* + * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, + * so we only have to reinitialize it. + */ + INIT_LIST_HEAD(&new_zhdr->buddy); new_mapping = page_mapping(page); __ClearPageMovable(page); ClearPagePrivate(page); @@ -1386,7 +1396,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); page_mapcount_reset(page); - unlock_page(page); put_page(page); return 0; } |