summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2022-10-31 14:11:57 +0100
committerAlexander Gordeev <agordeev@linux.ibm.com>2022-11-23 16:22:21 +0100
commit254b2fd02e34a5761cd2a3aad8b24a7ddd8962e1 (patch)
tree542937306fd49187b34496ef71f76246f046292d /arch/s390/mm/init.c
parents390: use generic vga.h header file (diff)
downloadlinux-254b2fd02e34a5761cd2a3aad8b24a7ddd8962e1.tar.xz
linux-254b2fd02e34a5761cd2a3aad8b24a7ddd8962e1.zip
s390/mm: provide minimal setup_per_cpu_areas() implementation
s390 allows to enable CONFIG_NUMA, mainly to enable a couple of system calls which are only present if NUMA is enabled. The NUMA specific system calls are required by a couple of applications, which wouldn't work if the system calls wouldn't be present. The NUMA implementation itself maps all CPUs and memory to node 0. A special case is the generic percpu setup code, which doesn't expect an s390 like implementation and therefore emits a message/warning: "percpu: cpu 0 has no node -1 or node-local memory". In order to get rid of this message, and also to provide sane CPU to node and CPU distance mappings implement a minimal setup_per_cpu_areas() function, which is very close to the generic variant. Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 97d66a3e60fb..a28832eefb06 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -31,6 +31,7 @@
#include <linux/cma.h>
#include <linux/gfp.h>
#include <linux/dma-direct.h>
+#include <linux/percpu.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
@@ -222,6 +223,41 @@ unsigned long memory_block_size_bytes(void)
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ return LOCAL_DISTANCE;
+}
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return 0;
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc;
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+ pcpu_cpu_distance,
+ pcpu_cpu_to_node);
+ if (rc < 0)
+ panic("Failed to initialize percpu areas.");
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
#ifdef CONFIG_CMA