diff options
author | David Hildenbrand <david@redhat.com> | 2019-10-01 11:01:51 +0200 |
---|---|---|
committer | Boris Ostrovsky <boris.ostrovsky@oracle.com> | 2019-10-01 21:52:53 +0200 |
commit | 59b52f105f23edebf497bf8f0e57660e649e3f17 (patch) | |
tree | ae32f9cd4048407235f6ee7573047fcfa05db233 /drivers/xen | |
parent | xen/balloon: Drop __balloon_append() (diff) | |
download | linux-59b52f105f23edebf497bf8f0e57660e649e3f17.tar.xz linux-59b52f105f23edebf497bf8f0e57660e649e3f17.zip |
xen/balloon: Mark pages PG_offline in balloon_append()
Let's move the __SetPageOffline() call which all callers perform into
balloon_append().
In bp_state decrease_reservation(), pages are now marked PG_offline a
little later than before, however, this should not matter for XEN.
Suggested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/balloon.c | 13 |
1 files changed, 4 insertions, 9 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 37443c5fda99..8c245e99bb06 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -158,6 +158,8 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); /* balloon_append: add the given page to the balloon. */ static void balloon_append(struct page *page) { + __SetPageOffline(page); + /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); @@ -372,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order) for (i = 0; i < size; i++) { p = pfn_to_page(start_pfn + i); __online_page_set_limits(p); - __SetPageOffline(p); balloon_append(p); } mutex_unlock(&balloon_mutex); @@ -466,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) state = BP_EAGAIN; break; } - __SetPageOffline(page); adjust_managed_page_count(page, -1); xenmem_reservation_scrub_page(page); list_add(&page->lru, &pages); @@ -648,10 +648,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) mutex_lock(&balloon_mutex); for (i = 0; i < nr_pages; i++) { - if (pages[i]) { - __SetPageOffline(pages[i]); + if (pages[i]) balloon_append(pages[i]); - } } balloon_stats.target_unpopulated -= nr_pages; @@ -669,7 +667,6 @@ static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { unsigned long pfn, extra_pfn_end; - struct page *page; /* * If the amount of usable memory has been limited (e.g., with @@ -679,12 +676,10 @@ static void __init balloon_add_region(unsigned long start_pfn, extra_pfn_end = min(max_pfn, start_pfn + pages); for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { - page = pfn_to_page(pfn); /* totalram_pages and totalhigh_pages do not include the boot-time balloon extension, so don't subtract from it. */ - __SetPageOffline(page); - balloon_append(page); + balloon_append(pfn_to_page(pfn)); } balloon_stats.total_pages += extra_pfn_end - start_pfn; |