diff options
author | Li RongQing <lirongqing@baidu.com> | 2024-05-20 14:08:58 +0200 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2024-06-04 01:14:11 +0200 |
commit | 99a49093ce92d6116a1635802f9c16d0db6e805a (patch) | |
tree | 9efb8da910363a8ec40e7ab9bece1eb6cf023571 | |
parent | KVM: SVM: not account memory allocation for per-CPU svm_data (diff) | |
download | linux-99a49093ce92d6116a1635802f9c16d0db6e805a.tar.xz linux-99a49093ce92d6116a1635802f9c16d0db6e805a.zip |
KVM: SVM: Consider NUMA affinity when allocating per-CPU save_area
save_area of per-CPU svm_data are dominantly accessed from their
own local CPUs, so allocate them node-local for performance reason
so rename __snp_safe_alloc_page as snp_safe_alloc_page_node which
accepts numa node id as input parameter, svm_cpu_init call it with
node id switched from cpu id
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20240520120858.13117-4-lirongqing@baidu.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r-- | arch/x86/kvm/svm/sev.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.h | 10 |
3 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 4d534788bfa3..1c55159a2159 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3380,13 +3380,13 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) } } -struct page *__snp_safe_alloc_page(gfp_t gfp) +struct page *snp_safe_alloc_page_node(int node, gfp_t gfp) { unsigned long pfn; struct page *p; if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) - return alloc_page(gfp | __GFP_ZERO); + return alloc_pages_node(node, gfp | __GFP_ZERO, 0); /* * Allocate an SNP-safe page to workaround the SNP erratum where @@ -3397,7 +3397,7 @@ struct page *__snp_safe_alloc_page(gfp_t gfp) * Allocate one extra page, choose a page which is not * 2MB-aligned, and free the other. */ - p = alloc_pages(gfp | __GFP_ZERO, 1); + p = alloc_pages_node(node, gfp | __GFP_ZERO, 1); if (!p) return NULL; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index adbd676708f6..da5cddec97a6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -703,7 +703,7 @@ static int svm_cpu_init(int cpu) int ret = -ENOMEM; memset(sd, 0, sizeof(struct svm_cpu_data)); - sd->save_area = __snp_safe_alloc_page(GFP_KERNEL); + sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL); if (!sd->save_area) return ret; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index e0a12582ae78..8983eabf8f84 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -694,11 +694,11 @@ void sev_guest_memory_reclaimed(struct kvm *kvm); int sev_handle_vmgexit(struct kvm_vcpu *vcpu); /* These symbols are used in common code and are stubbed below. */ -struct page *__snp_safe_alloc_page(gfp_t gfp); +struct page *snp_safe_alloc_page_node(int node, gfp_t gfp); static inline struct page *snp_safe_alloc_page(void) { - return __snp_safe_alloc_page(GFP_KERNEL_ACCOUNT); + return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT); } void sev_free_vcpu(struct kvm_vcpu *vcpu); @@ -710,14 +710,14 @@ int sev_cpu_init(struct svm_cpu_data *sd); int sev_dev_get_attr(u32 group, u64 attr, u64 *val); extern unsigned int max_sev_asid; #else -static inline struct page *__snp_safe_alloc_page(gfp_t gfp) +static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp) { - return alloc_page(gfp | __GFP_ZERO); + return alloc_pages_node(node, gfp | __GFP_ZERO, 0); } static inline struct page *snp_safe_alloc_page(void) { - return __snp_safe_alloc_page(GFP_KERNEL_ACCOUNT); + return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT); } static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {} |