From f5775e0b6116b7e2425ccf535243b21768566d87 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Mon, 19 Jan 2015 11:08:05 +0000 Subject: x86/xen: discard RAM regions above the maximum reservation During setup, discard RAM regions that are above the maximum reservation (instead of marking them as E820_UNUSABLE). This allows hotplug memory to be placed at these addresses. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- arch/x86/xen/setup.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1c30e4ab1022..387b60d9bd0e 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -829,6 +829,8 @@ char * __init xen_memory_setup(void) addr = xen_e820_map[0].addr; size = xen_e820_map[0].size; while (i < xen_e820_map_entries) { + bool discard = false; + chunk_size = size; type = xen_e820_map[i].type; @@ -843,10 +845,11 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else - type = E820_UNUSABLE; + discard = true; } - xen_align_and_add_e820_region(addr, chunk_size, type); + if (!discard) + xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; -- cgit v1.2.3 From 81b286e0f1fe520f2a96f736ffa7e508ac9139ba Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 25 Jun 2015 13:12:46 +0100 Subject: xen/balloon: make alloc_xenballoon_pages() always allocate low pages All users of alloc_xenballoon_pages() wanted low memory pages, so remove the option for high memory. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- arch/x86/xen/grant-table.c | 2 +- drivers/xen/balloon.c | 21 ++++++++------------- drivers/xen/grant-table.c | 2 +- drivers/xen/privcmd.c | 2 +- drivers/xen/xenbus/xenbus_client.c | 3 +-- include/xen/balloon.h | 3 +-- 6 files changed, 13 insertions(+), 20 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index 1580e7a5a4cf..e079500b17f3 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c @@ -133,7 +133,7 @@ static int __init xlated_setup_gnttab_pages(void) kfree(pages); return -ENOMEM; } - rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */); + rc = alloc_xenballooned_pages(nr_grant_frames, pages); if (rc) { pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__, nr_grant_frames, rc); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index ac6391bd8029..7ec933d505d2 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -136,17 +136,16 @@ static void balloon_append(struct page *page) } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ -static struct page *balloon_retrieve(bool prefer_highmem) +static struct page *balloon_retrieve(bool require_lowmem) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; - if (prefer_highmem) - page = list_entry(ballooned_pages.prev, struct page, lru); - else - page = list_entry(ballooned_pages.next, struct page, lru); + page = list_entry(ballooned_pages.next, struct page, lru); + if (require_lowmem && PageHighMem(page)) + return NULL; list_del(&page->lru); if (PageHighMem(page)) @@ -521,24 +520,20 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target); * alloc_xenballooned_pages - get pages that have been ballooned out * @nr_pages: Number of pages to get * @pages: pages returned - * @highmem: allow highmem pages * @return 0 on success, error otherwise */ -int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) +int alloc_xenballooned_pages(int nr_pages, struct page **pages) { int pgno = 0; struct page *page; mutex_lock(&balloon_mutex); while (pgno < nr_pages) { - page = balloon_retrieve(highmem); - if (page && (highmem || !PageHighMem(page))) { + page = balloon_retrieve(true); + if (page) { pages[pgno++] = page; } else { enum bp_state st; - if (page) - balloon_append(page); - st = decrease_reservation(nr_pages - pgno, - highmem ? GFP_HIGHUSER : GFP_USER); + st = decrease_reservation(nr_pages - pgno, GFP_USER); if (st != BP_DONE) goto out_undo; } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 62f591f8763c..a4b702c9ac68 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -687,7 +687,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages) int i; int ret; - ret = alloc_xenballooned_pages(nr_pages, pages, false); + ret = alloc_xenballooned_pages(nr_pages, pages); if (ret < 0) return ret; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 5e9adac928e6..b199ad3d4587 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -401,7 +401,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) if (pages == NULL) return -ENOMEM; - rc = alloc_xenballooned_pages(numpgs, pages, 0); + rc = alloc_xenballooned_pages(numpgs, pages); if (rc != 0) { pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, numpgs, rc); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 2ba09c1195c8..aa304d05101b 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -614,8 +614,7 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, if (!node) return -ENOMEM; - err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages, - false /* lowmem */); + err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages); if (err) goto out_err; diff --git a/include/xen/balloon.h b/include/xen/balloon.h index c8aee7a8b8d2..83efdeb243bf 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -22,8 +22,7 @@ extern struct balloon_stats balloon_stats; void balloon_set_new_target(unsigned long target); -int alloc_xenballooned_pages(int nr_pages, struct page **pages, - bool highmem); +int alloc_xenballooned_pages(int nr_pages, struct page **pages); void free_xenballooned_pages(int nr_pages, struct page **pages); struct device; -- cgit v1.2.3 From 8edfcf882eb91ec9028c7334f90f6ef3db5b0fcf Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 22 Jul 2015 14:48:09 +0100 Subject: x86/xen: export xen_alloc_p2m_entry() Rename alloc_p2m() to xen_alloc_p2m_entry() and export it. This is useful for ensuring that a p2m entry is allocated (i.e., not a shared missing or identity entry) so that subsequent set_phys_to_machine() calls will require no further allocations. Signed-off-by: David Vrabel Reviewed-by: Daniel Kiper --- v3: - Make xen_alloc_p2m_entry() a nop on auto-xlate guests. --- arch/x86/include/asm/xen/page.h | 2 ++ arch/x86/xen/p2m.c | 19 +++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 0679e11d2cf7..b922fa4bb4a1 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -43,6 +43,8 @@ extern unsigned long *xen_p2m_addr; extern unsigned long xen_p2m_size; extern unsigned long xen_max_p2m_pfn; +extern int xen_alloc_p2m_entry(unsigned long pfn); + extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 660b3cfef234..cab9f766bb06 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -530,7 +530,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */ -static bool alloc_p2m(unsigned long pfn) +int xen_alloc_p2m_entry(unsigned long pfn) { unsigned topidx; unsigned long *top_mfn_p, *mid_mfn; @@ -540,6 +540,9 @@ static bool alloc_p2m(unsigned long pfn) unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); unsigned long p2m_pfn; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + ptep = lookup_address(addr, &level); BUG_ON(!ptep || level != PG_LEVEL_4K); pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); @@ -548,7 +551,7 @@ static bool alloc_p2m(unsigned long pfn) /* PMD level is missing, allocate a new one */ ptep = alloc_p2m_pmd(addr, pte_pg); if (!ptep) - return false; + return -ENOMEM; } if (p2m_top_mfn && pfn < MAX_P2M_PFN) { @@ -566,7 +569,7 @@ static bool alloc_p2m(unsigned long pfn) mid_mfn = alloc_p2m_page(); if (!mid_mfn) - return false; + return -ENOMEM; p2m_mid_mfn_init(mid_mfn, p2m_missing); @@ -592,7 +595,7 @@ static bool alloc_p2m(unsigned long pfn) p2m = alloc_p2m_page(); if (!p2m) - return false; + return -ENOMEM; if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) p2m_init(p2m); @@ -625,8 +628,9 @@ static bool alloc_p2m(unsigned long pfn) HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; } - return true; + return 0; } +EXPORT_SYMBOL(xen_alloc_p2m_entry); unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) @@ -688,7 +692,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { - if (!alloc_p2m(pfn)) + int ret; + + ret = xen_alloc_p2m_entry(pfn); + if (ret < 0) return false; return __set_phys_to_machine(pfn, mfn); -- cgit v1.2.3 From a314e3eb845389b8f68130c79a63832229dea87b Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Thu, 22 Oct 2015 16:20:46 +0000 Subject: xen/arm: Enable cpu_hotplug.c Build cpu_hotplug for ARM and ARM64 guests. Rename arch_(un)register_cpu to xen_(un)register_cpu and provide an empty implementation on ARM and ARM64. On x86 just call arch_(un)register_cpu as we are already doing. Initialize cpu_hotplug on ARM. Signed-off-by: Stefano Stabellini Reviewed-by: Julien Grall Reviewed-by: Boris Ostrovsky --- arch/arm/include/asm/xen/hypervisor.h | 10 ++++++++++ arch/x86/include/asm/xen/hypervisor.h | 5 +++++ arch/x86/xen/enlighten.c | 15 +++++++++++++++ drivers/xen/Makefile | 2 -- drivers/xen/cpu_hotplug.c | 8 ++++++-- 5 files changed, 36 insertions(+), 4 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index 04ff8e7b37df..95251512e2c4 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -26,4 +26,14 @@ void __init xen_early_init(void); static inline void xen_early_init(void) { return; } #endif +#ifdef CONFIG_HOTPLUG_CPU +static inline void xen_arch_register_cpu(int num) +{ +} + +static inline void xen_arch_unregister_cpu(int num) +{ +} +#endif + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index d866959e5685..8b2d4bea9962 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -57,4 +57,9 @@ static inline bool xen_x2apic_para_available(void) } #endif +#ifdef CONFIG_HOTPLUG_CPU +void xen_arch_register_cpu(int num); +void xen_arch_unregister_cpu(int num); +#endif + #endif /* _ASM_X86_XEN_HYPERVISOR_H */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 993b7a71386d..5774800ff583 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -75,6 +75,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI #include @@ -1899,3 +1900,17 @@ const struct hypervisor_x86 x86_hyper_xen = { .set_cpu_features = xen_set_cpu_features, }; EXPORT_SYMBOL(x86_hyper_xen); + +#ifdef CONFIG_HOTPLUG_CPU +void xen_arch_register_cpu(int num) +{ + arch_register_cpu(num); +} +EXPORT_SYMBOL(xen_arch_register_cpu); + +void xen_arch_unregister_cpu(int num) +{ + arch_unregister_cpu(num); +} +EXPORT_SYMBOL(xen_arch_unregister_cpu); +#endif diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index e293bc507cbc..aa8a7f71f310 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -1,6 +1,4 @@ -ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),) obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o -endif obj-$(CONFIG_X86) += fallback.o obj-y += grant-table.o features.o balloon.o manage.o preempt.o obj-y += events/ diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index cc6513a176b0..43de1f51b53f 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -11,7 +11,7 @@ static void enable_hotplug_cpu(int cpu) { if (!cpu_present(cpu)) - arch_register_cpu(cpu); + xen_arch_register_cpu(cpu); set_cpu_present(cpu, true); } @@ -19,7 +19,7 @@ static void enable_hotplug_cpu(int cpu) static void disable_hotplug_cpu(int cpu) { if (cpu_present(cpu)) - arch_unregister_cpu(cpu); + xen_arch_unregister_cpu(cpu); set_cpu_present(cpu, false); } @@ -102,7 +102,11 @@ static int __init setup_vcpu_hotplug_event(void) static struct notifier_block xsn_cpu = { .notifier_call = setup_cpu_watcher }; +#ifdef CONFIG_X86 if (!xen_pv_domain()) +#else + if (!xen_domain()) +#endif return -ENODEV; register_xenstore_notifier(&xsn_cpu); -- cgit v1.2.3 From 914beb9fc26d6225295b8315ab54026f8f22755c Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 28 Oct 2015 13:39:05 +0000 Subject: x86/xen: add reschedule point when mapping foreign GFNs Mapping a large range of foreign GFNs can take a long time, add a reschedule point after each batch of 16 GFNs. Signed-off-by: David Vrabel Reviewed-by: Boris Ostrovsky --- arch/x86/xen/mmu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 9c479fe40459..ac161db63388 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2888,6 +2888,7 @@ static int do_remap_gfn(struct vm_area_struct *vma, addr += range; if (err_ptr) err_ptr += batch; + cond_resched(); } out: -- cgit v1.2.3 From abed7d0710e8f892c267932a9492ccf447674fb8 Mon Sep 17 00:00:00 2001 From: Zhenzhong Duan Date: Tue, 27 Oct 2015 15:19:52 -0400 Subject: xen: fix the check of e_pfn in xen_find_pfn_range On some NUMA system, after dom0 up, we see below warning even if there are enough pfn ranges that could be used for remapping: "Unable to find available pfn range, not remapping identity pages" Fix it to avoid getting a memory region of zero size in xen_find_pfn_range. Signed-off-by: Zhenzhong Duan Reviewed-by: Juergen Gross Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: David Vrabel --- arch/x86/xen/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 387b60d9bd0e..d1fac0e53d57 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -212,7 +212,7 @@ static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn) e_pfn = PFN_DOWN(entry->addr + entry->size); /* We only care about E820 after this */ - if (e_pfn < *min_pfn) + if (e_pfn <= *min_pfn) continue; s_pfn = PFN_UP(entry->addr); -- cgit v1.2.3 From de0afc9bdeeadaa998797d2333c754bf9f4d5dcf Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Wed, 2 Dec 2015 12:10:48 -0500 Subject: xen: Resume PMU from non-atomic context Resuming PMU currently triggers a warning from ___might_sleep() (assuming CONFIG_DEBUG_ATOMIC_SLEEP is set) when xen_pmu_init() allocates GFP_KERNEL page because we are in state resembling atomic context. Move resuming PMU to xen_arch_resume() which is called in regular context. For symmetry move suspending PMU to xen_arch_suspend() as well. Signed-off-by: Boris Ostrovsky Reported-by: Konrad Rzeszutek Wilk Cc: # 4.3 Signed-off-by: David Vrabel --- arch/x86/xen/suspend.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index feddabdab448..3705eabd7e22 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled) void xen_arch_pre_suspend(void) { - int cpu; - - for_each_online_cpu(cpu) - xen_pmu_finish(cpu); - if (xen_pv_domain()) xen_pv_pre_suspend(); } void xen_arch_post_suspend(int cancelled) { - int cpu; - if (xen_pv_domain()) xen_pv_post_suspend(cancelled); else xen_hvm_post_suspend(cancelled); - - for_each_online_cpu(cpu) - xen_pmu_init(cpu); } static void xen_vcpu_notify_restore(void *data) @@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data) void xen_arch_resume(void) { + int cpu; + on_each_cpu(xen_vcpu_notify_restore, NULL, 1); + + for_each_online_cpu(cpu) + xen_pmu_init(cpu); } void xen_arch_suspend(void) { + int cpu; + + for_each_online_cpu(cpu) + xen_pmu_finish(cpu); + on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); } -- cgit v1.2.3 From 20f36e0380a7e871a711d5e4e59d04d4948326b4 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Sat, 12 Dec 2015 19:25:55 -0500 Subject: xen/x86/pvh: Use HVM's flush_tlb_others op Using MMUEXT_TLB_FLUSH_MULTI doesn't buy us much since the hypervisor will likely perform same IPIs as would have the guest. More importantly, using MMUEXT_INVLPG_MULTI may not to invalidate the guest's address on remote CPU (when, for example, VCPU from another guest is running there). Signed-off-by: Boris Ostrovsky Suggested-by: Jan Beulich Signed-off-by: David Vrabel --- arch/x86/xen/mmu.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ac161db63388..cb5e266a8bf7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void) { x86_init.paging.pagetable_init = xen_pagetable_init; - /* Optimization - we can use the HVM one but it has no idea which - * VCPUs are descheduled - which means that it will needlessly IPI - * them. Xen knows so let it do the job. - */ - if (xen_feature(XENFEAT_auto_translated_physmap)) { - pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others; + if (xen_feature(XENFEAT_auto_translated_physmap)) return; - } + pv_mmu_ops = xen_mmu_ops; memset(dummy_mapping, 0xff, PAGE_SIZE); -- cgit v1.2.3