diff options
author | Ross Lagerwall <ross.lagerwall@citrix.com> | 2016-12-09 18:10:22 +0100 |
---|---|---|
committer | Juergen Gross <jgross@suse.com> | 2016-12-12 15:22:22 +0100 |
commit | 709613ad2b3c9eaeb2a3e24284b7c8feffc17326 (patch) | |
tree | 8715afd7b68201c7180126ced3b3f4065882c8b5 /drivers/xen/balloon.c | |
parent | xenbus: fix deadlock on writes to /proc/xen/xenbus (diff) | |
download | linux-709613ad2b3c9eaeb2a3e24284b7c8feffc17326.tar.xz linux-709613ad2b3c9eaeb2a3e24284b7c8feffc17326.zip |
xen/balloon: Only mark a page as managed when it is released
Only mark a page as managed when it is released back to the allocator.
This ensures that the managed page count does not get falsely increased
when a VM is running. Correspondingly change it so that pages are
marked as unmanaged after getting them from the allocator.
Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'drivers/xen/balloon.c')
-rw-r--r-- | drivers/xen/balloon.c | 6 |
1 files changed, 2 insertions, 4 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index e4db19e88ab1..db107fa50ca1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -180,7 +180,6 @@ static void __balloon_append(struct page *page) static void balloon_append(struct page *page) { __balloon_append(page); - adjust_managed_page_count(page, -1); } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ @@ -201,8 +200,6 @@ static struct page *balloon_retrieve(bool require_lowmem) else balloon_stats.balloon_low--; - adjust_managed_page_count(page, 1); - return page; } @@ -478,7 +475,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) #endif /* Relinquish the page back to the allocator. */ - __free_reserved_page(page); + free_reserved_page(page); } balloon_stats.current_pages += rc; @@ -509,6 +506,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) state = BP_EAGAIN; break; } + adjust_managed_page_count(page, -1); scrub_page(page); list_add(&page->lru, &pages); } |