summaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2023-08-08 11:14:58 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-08-21 22:37:48 +0200
commit85a2b4b08f202d67be81e2453064e01572ec19c8 (patch)
tree52a211da068b0092b026a54cfba4f15f5f29dd6e /mm/memory_hotplug.c
parentmm/memory_hotplug: allow memmap on memory hotplug request to fallback (diff)
downloadlinux-85a2b4b08f202d67be81e2453064e01572ec19c8.tar.xz
linux-85a2b4b08f202d67be81e2453064e01572ec19c8.zip
mm/memory_hotplug: allow architecture to override memmap on memory support check
Some architectures would want different restrictions. Hence add an architecture-specific override. The PMD_SIZE check is moved there. Link: https://lkml.kernel.org/r/20230808091501.287660-4-aneesh.kumar@linux.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index eca32ccd45cc..746cb7c08c64 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1247,10 +1247,26 @@ static int online_memory_block(struct memory_block *mem, void *arg)
return device_online(&mem->dev);
}
+static inline unsigned long memory_block_memmap_size(void)
+{
+ return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page);
+}
+
+#ifndef arch_supports_memmap_on_memory
+static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
+{
+ /*
+ * As default, we want the vmemmap to span a complete PMD such that we
+ * can map the vmemmap using a single PMD if supported by the
+ * architecture.
+ */
+ return IS_ALIGNED(vmemmap_size, PMD_SIZE);
+}
+#endif
+
static bool mhp_supports_memmap_on_memory(unsigned long size)
{
- unsigned long nr_vmemmap_pages = size / PAGE_SIZE;
- unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
+ unsigned long vmemmap_size = memory_block_memmap_size();
unsigned long remaining_size = size - vmemmap_size;
/*
@@ -1281,8 +1297,8 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
*/
return mhp_memmap_on_memory() &&
size == memory_block_size_bytes() &&
- IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
- IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
+ IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)) &&
+ arch_supports_memmap_on_memory(vmemmap_size);
}
/*