summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2022-11-30 19:43:28 +0100
committerAlexander Gordeev <agordeev@linux.ibm.com>2022-12-06 16:18:26 +0100
commitbb3860cc02c660b409a1e02521b84b1d7d4e84cd (patch)
treec787504a45ab48437b3aa2316e9fa558fe937d4f /arch/s390
parents390/nmi: move storage error checking back to C, enter with DAT on (diff)
downloadlinux-bb3860cc02c660b409a1e02521b84b1d7d4e84cd.tar.xz
linux-bb3860cc02c660b409a1e02521b84b1d7d4e84cd.zip
s390/nmi: get rid of private slab cache
Get rid of private "nmi_save_areas" slab cache. The only reason this was introduced years ago was that with some slab debugging options allocations would only guarantee a minimum alignment of ARCH_KMALLOC_MINALIGN, which was eight bytes back then. This is not sufficient for the extended machine check save area. However since commit 59bb47985c1d ("mm, sl[aou]b: guarantee natural alignment for kmalloc(power-of-two)") kmalloc guarantees a power-of-two alignment even with debugging options enabled. Therefore the private slab cache can be removed. Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/nmi.c40
1 files changed, 9 insertions, 31 deletions
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index b54d4aa33851..5dbf274719a9 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -42,21 +42,12 @@ struct mcck_struct {
};
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
-static struct kmem_cache *mcesa_cache;
-static unsigned long mcesa_origin_lc;
static inline int nmi_needs_mcesa(void)
{
return MACHINE_HAS_VX || MACHINE_HAS_GS;
}
-static inline unsigned long nmi_get_mcesa_size(void)
-{
- if (MACHINE_HAS_GS)
- return MCESA_MAX_SIZE;
- return MCESA_MIN_SIZE;
-}
-
/*
* The initial machine check extended save area for the boot CPU.
* It will be replaced on the boot CPU reinit with an allocated
@@ -74,36 +65,23 @@ void __init nmi_alloc_mcesa_early(u64 *mcesad)
*mcesad |= ilog2(MCESA_MAX_SIZE);
}
-static void __init nmi_alloc_cache(void)
+int nmi_alloc_mcesa(u64 *mcesad)
{
unsigned long size;
-
- if (!nmi_needs_mcesa())
- return;
- size = nmi_get_mcesa_size();
- if (size > MCESA_MIN_SIZE)
- mcesa_origin_lc = ilog2(size);
- /* create slab cache for the machine-check-extended-save-areas */
- mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
- if (!mcesa_cache)
- panic("Couldn't create nmi save area cache");
-}
-
-int __ref nmi_alloc_mcesa(u64 *mcesad)
-{
- unsigned long origin;
+ void *origin;
*mcesad = 0;
if (!nmi_needs_mcesa())
return 0;
- if (!mcesa_cache)
- nmi_alloc_cache();
- origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
+ size = MACHINE_HAS_GS ? MCESA_MAX_SIZE : MCESA_MIN_SIZE;
+ origin = kmalloc(size, GFP_KERNEL);
if (!origin)
return -ENOMEM;
/* The pointer is stored with mcesa_bits ORed in */
- kmemleak_not_leak((void *) origin);
- *mcesad = __pa(origin) | mcesa_origin_lc;
+ kmemleak_not_leak(origin);
+ *mcesad = __pa(origin);
+ if (MACHINE_HAS_GS)
+ *mcesad |= ilog2(MCESA_MAX_SIZE);
return 0;
}
@@ -111,7 +89,7 @@ void nmi_free_mcesa(u64 *mcesad)
{
if (!nmi_needs_mcesa())
return;
- kmem_cache_free(mcesa_cache, __va(*mcesad & MCESA_ORIGIN_MASK));
+ kfree(__va(*mcesad & MCESA_ORIGIN_MASK));
}
static __always_inline char *nmi_puts(char *dest, const char *src)