summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-21 18:02:54 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-21 18:02:54 +0200
commit1ec6d097897a35dfb55c4c31fc8633cf5be46497 (patch)
tree1c86bdbc1c80e8b1f89c77969b86143df9f64886 /arch/s390/mm
parentMerge tag 'mm-nonmm-stable-2024-09-21-07-52' of git://git.kernel.org/pub/scm/... (diff)
parents390/crypto: Display Query and Query Authentication Information in sysfs (diff)
downloadlinux-1ec6d097897a35dfb55c4c31fc8633cf5be46497.tar.xz
linux-1ec6d097897a35dfb55c4c31fc8633cf5be46497.zip
Merge tag 's390-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik: - Optimize ftrace and kprobes code patching and avoid stop machine for kprobes if sequential instruction fetching facility is available - Add hiperdispatch feature to dynamically adjust CPU capacity in vertical polarization to improve scheduling efficiency and overall performance. Also add infrastructure for handling warning track interrupts (WTI), allowing for graceful CPU preemption - Rework crypto code pkey module and split it into separate, independent modules for sysfs, PCKMO, CCA, and EP11, allowing modules to load only when the relevant hardware is available - Add hardware acceleration for HMAC modes and the full AES-XTS cipher, utilizing message-security assist extensions (MSA) 10 and 11. It introduces new shash implementations for HMAC-SHA224/256/384/512 and registers the hardware-accelerated AES-XTS cipher as the preferred option. Also add clear key token support - Add MSA 10 and 11 processor activity instrumentation counters to perf and update PAI Extension 1 NNPA counters - Cleanup cpu sampling facility code and rework debug/WARN_ON_ONCE statements - Add support for SHA3 performance enhancements introduced with MSA 12 - Add support for the query authentication information feature of MSA 13 and introduce the KDSA CPACF instruction. Provide query and query authentication information in sysfs, enabling tools like cpacfinfo to present this data in a human-readable form - Update kernel disassembler instructions - Always enable EXPOLINE_EXTERN if supported by the compiler to ensure kpatch compatibility - Add missing warning handling and relocated lowcore support to the early program check handler - Optimize ftrace_return_address() and avoid calling unwinder - Make modules use kernel ftrace trampolines - Strip relocs from the final vmlinux ELF file to make it roughly 2 times smaller - Dump register contents and call trace for early crashes to the console - Generate ptdump address marker array dynamically - Fix rcu_sched stalls that might occur when adding or removing large amounts of pages at once to or from the CMM balloon - Fix deadlock caused by recursive lock of the AP bus scan mutex - Unify sync and async register save areas in entry code - Cleanup debug prints in crypto code - Various cleanup and sanitizing patches for the decompressor - Various small ftrace cleanups * tag 's390-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (84 commits) s390/crypto: Display Query and Query Authentication Information in sysfs s390/crypto: Add Support for Query Authentication Information s390/crypto: Rework RRE and RRF CPACF inline functions s390/crypto: Add KDSA CPACF Instruction s390/disassembler: Remove duplicate instruction format RSY_RDRU s390/boot: Move boot_printk() code to own file s390/boot: Use boot_printk() instead of sclp_early_printk() s390/boot: Rename decompressor_printk() to boot_printk() s390/boot: Compile all files with the same march flag s390: Use MARCH_HAS_*_FEATURES defines s390: Provide MARCH_HAS_*_FEATURES defines s390/facility: Disable compile time optimization for decompressor code s390/boot: Increase minimum architecture to z10 s390/als: Remove obsolete comment s390/sha3: Fix SHA3 selftests failures s390/pkey: Add AES xts and HMAC clear key token support s390/cpacf: Add MSA 10 and 11 new PCKMO functions s390/mm: Add cond_resched() to cmm_alloc/free_pages() s390/pai_ext: Update PAI extension 1 counters s390/pai_crypto: Add support for MSA 10 and 11 pai counters ...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/cmm.c18
-rw-r--r--arch/s390/mm/dump_pagetables.c191
2 files changed, 94 insertions, 115 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 75d15bf41d97..d01724a715d0 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -95,11 +95,12 @@ static long cmm_alloc_pages(long nr, long *counter,
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
+ cond_resched();
}
return nr;
}
-static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+static long __cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
@@ -123,6 +124,21 @@ static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
return nr;
}
+static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
+{
+ long inc = 0;
+
+ while (nr) {
+ inc = min(256L, nr);
+ nr -= inc;
+ inc = __cmm_free_pages(inc, counter, list);
+ if (inc)
+ break;
+ cond_resched();
+ }
+ return nr + inc;
+}
+
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 0a67fcee4414..fa54f3bc0c8d 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,89 +18,12 @@ static unsigned long max_addr;
struct addr_marker {
int is_start;
unsigned long start_address;
+ unsigned long size;
const char *name;
};
-enum address_markers_idx {
- KVA_NR = 0,
- LOWCORE_START_NR,
- LOWCORE_END_NR,
- AMODE31_START_NR,
- AMODE31_END_NR,
- KERNEL_START_NR,
- KERNEL_END_NR,
-#ifdef CONFIG_KFENCE
- KFENCE_START_NR,
- KFENCE_END_NR,
-#endif
- IDENTITY_START_NR,
- IDENTITY_END_NR,
- VMEMMAP_NR,
- VMEMMAP_END_NR,
- VMALLOC_NR,
- VMALLOC_END_NR,
-#ifdef CONFIG_KMSAN
- KMSAN_VMALLOC_SHADOW_START_NR,
- KMSAN_VMALLOC_SHADOW_END_NR,
- KMSAN_VMALLOC_ORIGIN_START_NR,
- KMSAN_VMALLOC_ORIGIN_END_NR,
- KMSAN_MODULES_SHADOW_START_NR,
- KMSAN_MODULES_SHADOW_END_NR,
- KMSAN_MODULES_ORIGIN_START_NR,
- KMSAN_MODULES_ORIGIN_END_NR,
-#endif
- MODULES_NR,
- MODULES_END_NR,
- ABS_LOWCORE_NR,
- ABS_LOWCORE_END_NR,
- MEMCPY_REAL_NR,
- MEMCPY_REAL_END_NR,
-#ifdef CONFIG_KASAN
- KASAN_SHADOW_START_NR,
- KASAN_SHADOW_END_NR,
-#endif
-};
-
-static struct addr_marker address_markers[] = {
- [KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
- [LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
- [LOWCORE_END_NR] = {0, 0, "Lowcore End"},
- [IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
- [IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
- [AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
- [AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
- [KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
- [KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
-#ifdef CONFIG_KFENCE
- [KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
- [KFENCE_END_NR] = {0, 0, "KFence Pool End"},
-#endif
- [VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
- [VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
- [VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
- [VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
-#ifdef CONFIG_KMSAN
- [KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
- [KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
- [KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
- [KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
- [KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
- [KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
- [KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
- [KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
-#endif
- [MODULES_NR] = {1, 0, "Modules Area Start"},
- [MODULES_END_NR] = {0, 0, "Modules Area End"},
- [ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
- [ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
- [MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
- [MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
-#ifdef CONFIG_KASAN
- [KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
- [KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
-#endif
- {1, -1UL, NULL}
-};
+static struct addr_marker *markers;
+static unsigned int markers_cnt;
struct pg_state {
struct ptdump_state ptdump;
@@ -173,7 +96,8 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr, unsi
while (addr >= st->marker[1].start_address) {
st->marker++;
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(m, "---[ %s %s ]---\n", st->marker->name,
+ st->marker->is_start ? "Start" : "End");
}
st->start_address = addr;
st->current_prot = prot;
@@ -202,7 +126,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
if (level == -1)
addr = max_addr;
if (st->level == -1) {
- pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_puts(m, "---[ Kernel Virtual Address Space ]---\n");
note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
@@ -276,7 +200,7 @@ static int ptdump_show(struct seq_file *m, void *v)
.check_wx = false,
.wx_pages = 0,
.start_address = 0,
- .marker = address_markers,
+ .marker = markers,
};
get_online_mems();
@@ -299,10 +223,23 @@ static int ptdump_cmp(const void *a, const void *b)
if (ama->start_address < amb->start_address)
return -1;
/*
- * If the start addresses of two markers are identical consider the
- * marker which defines the start of an area higher than the one which
- * defines the end of an area. This keeps pairs of markers sorted.
+ * If the start addresses of two markers are identical sort markers in an
+ * order that considers areas contained within other areas correctly.
*/
+ if (ama->is_start && amb->is_start) {
+ if (ama->size > amb->size)
+ return -1;
+ if (ama->size < amb->size)
+ return 1;
+ return 0;
+ }
+ if (!ama->is_start && !amb->is_start) {
+ if (ama->size > amb->size)
+ return 1;
+ if (ama->size < amb->size)
+ return -1;
+ return 0;
+ }
if (ama->is_start)
return 1;
if (amb->is_start)
@@ -310,12 +247,41 @@ static int ptdump_cmp(const void *a, const void *b)
return 0;
}
+static int add_marker(unsigned long start, unsigned long end, const char *name)
+{
+ size_t oldsize, newsize;
+
+ oldsize = markers_cnt * sizeof(*markers);
+ newsize = oldsize + 2 * sizeof(*markers);
+ if (!oldsize)
+ markers = kvmalloc(newsize, GFP_KERNEL);
+ else
+ markers = kvrealloc(markers, newsize, GFP_KERNEL);
+ if (!markers)
+ goto error;
+ markers[markers_cnt].is_start = 1;
+ markers[markers_cnt].start_address = start;
+ markers[markers_cnt].size = end - start;
+ markers[markers_cnt].name = name;
+ markers_cnt++;
+ markers[markers_cnt].is_start = 0;
+ markers[markers_cnt].start_address = end;
+ markers[markers_cnt].size = end - start;
+ markers[markers_cnt].name = name;
+ markers_cnt++;
+ return 0;
+error:
+ markers_cnt = 0;
+ return -ENOMEM;
+}
+
static int pt_dump_init(void)
{
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
unsigned long lowcore = (unsigned long)get_lowcore();
+ int rc;
/*
* Figure out the maximum virtual address being accessible with the
@@ -324,41 +290,38 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
- address_markers[LOWCORE_START_NR].start_address = lowcore;
- address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
- address_markers[IDENTITY_START_NR].start_address = __identity_base;
- address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
- address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
- address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
- address_markers[MODULES_NR].start_address = MODULES_VADDR;
- address_markers[MODULES_END_NR].start_address = MODULES_END;
- address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
- address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
- address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
- address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
- address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
- address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
- address_markers[VMALLOC_NR].start_address = VMALLOC_START;
- address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+ /* start + end markers - must be added first */
+ rc = add_marker(0, -1UL, NULL);
+ rc |= add_marker((unsigned long)_stext, (unsigned long)_end, "Kernel Image");
+ rc |= add_marker(lowcore, lowcore + sizeof(struct lowcore), "Lowcore");
+ rc |= add_marker(__identity_base, __identity_base + ident_map_size, "Identity Mapping");
+ rc |= add_marker((unsigned long)__samode31, (unsigned long)__eamode31, "Amode31 Area");
+ rc |= add_marker(MODULES_VADDR, MODULES_END, "Modules Area");
+ rc |= add_marker(__abs_lowcore, __abs_lowcore + ABS_LOWCORE_MAP_SIZE, "Lowcore Area");
+ rc |= add_marker(__memcpy_real_area, __memcpy_real_area + MEMCPY_REAL_SIZE, "Real Memory Copy Area");
+ rc |= add_marker((unsigned long)vmemmap, (unsigned long)vmemmap + vmemmap_size, "vmemmap Area");
+ rc |= add_marker(VMALLOC_START, VMALLOC_END, "vmalloc Area");
#ifdef CONFIG_KFENCE
- address_markers[KFENCE_START_NR].start_address = kfence_start;
- address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+ rc |= add_marker(kfence_start, kfence_start + KFENCE_POOL_SIZE, "KFence Pool");
#endif
#ifdef CONFIG_KMSAN
- address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START;
- address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END;
- address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START;
- address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END;
- address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START;
- address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END;
- address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
- address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
+ rc |= add_marker(KMSAN_VMALLOC_SHADOW_START, KMSAN_VMALLOC_SHADOW_END, "Kmsan vmalloc Shadow");
+ rc |= add_marker(KMSAN_VMALLOC_ORIGIN_START, KMSAN_VMALLOC_ORIGIN_END, "Kmsan vmalloc Origins");
+ rc |= add_marker(KMSAN_MODULES_SHADOW_START, KMSAN_MODULES_SHADOW_END, "Kmsan Modules Shadow");
+ rc |= add_marker(KMSAN_MODULES_ORIGIN_START, KMSAN_MODULES_ORIGIN_END, "Kmsan Modules Origins");
+#endif
+#ifdef CONFIG_KASAN
+ rc |= add_marker(KASAN_SHADOW_START, KASAN_SHADOW_END, "Kasan Shadow");
#endif
- sort(address_markers, ARRAY_SIZE(address_markers) - 1,
- sizeof(address_markers[0]), ptdump_cmp, NULL);
+ if (rc)
+ goto error;
+ sort(&markers[1], markers_cnt - 1, sizeof(*markers), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
return 0;
+error:
+ kvfree(markers);
+ return -ENOMEM;
}
device_initcall(pt_dump_init);