diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-10 17:58:16 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-10 17:58:16 +0100 |
commit | f0d43b3a3809f280806825df2454fd83f71874c4 (patch) | |
tree | 6952e39c3657fb51e93960b78bb4fa0958b6e3e7 /arch/s390/kernel/smp.c | |
parent | Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/a... (diff) | |
parent | s390/dasd: use default_groups in kobj_type (diff) | |
download | linux-f0d43b3a3809f280806825df2454fd83f71874c4.tar.xz linux-f0d43b3a3809f280806825df2454fd83f71874c4.zip |
Merge tag 's390-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens:
"Besides all the small improvements and cleanups the most notable part
is the fast vector/SIMD implementation of the ChaCha20 stream cipher,
which is an adaptation of Andy Polyakov's code for the kernel.
Summary:
- add fast vector/SIMD implementation of the ChaCha20 stream cipher,
which mainly adapts Andy Polyakov's code for the kernel
- add status attribute to AP queue device so users can easily figure
out its status
- fix race in page table release code, and and lots of documentation
- remove uevent suppress from cio device driver, since it turned out
that it generated more problems than it solved problems
- quite a lot of virtual vs physical address confusion fixes
- various other small improvements and cleanups all over the place"
* tag 's390-5.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits)
s390/dasd: use default_groups in kobj_type
s390/sclp_sd: use default_groups in kobj_type
s390/pci: simplify __pciwb_mio() inline asm
s390: remove unused TASK_SIZE_OF
s390/crash_dump: fix virtual vs physical address handling
s390/crypto: fix compile error for ChaCha20 module
s390/mm: check 2KB-fragment page on release
s390/mm: better annotate 2KB pagetable fragments handling
s390/mm: fix 2KB pgtable release race
s390/sclp: release SCLP early buffer after kernel initialization
s390/nmi: disable interrupts on extended save area update
s390/zcrypt: CCA control CPRB sending
s390/disassembler: update opcode table
s390/uv: fix memblock virtual vs physical address confusion
s390/smp: fix memblock_phys_free() vs memblock_free() confusion
s390/sclp: fix memblock_phys_free() vs memblock_free() confusion
s390/exit: remove dead reference to do_exit from copy_thread
s390/ap: add missing virt_to_phys address conversion
s390/pgalloc: use pointers instead of unsigned long values
s390/pgalloc: add virt/phys address handling to base asce functions
...
Diffstat (limited to 'arch/s390/kernel/smp.c')
-rw-r--r-- | arch/s390/kernel/smp.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 78a8ea6fd582..2bad902d8437 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -212,7 +212,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); lc->preempt_count = PREEMPT_DISABLED; - if (nmi_alloc_per_cpu(lc)) + if (nmi_alloc_mcesa(&lc->mcesad)) goto out; lowcore_ptr[cpu] = lc; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); @@ -239,7 +239,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); lowcore_ptr[cpu] = NULL; - nmi_free_per_cpu(lc); + nmi_free_mcesa(&lc->mcesad); stack_free(async_stack); stack_free(mcck_stack); free_pages(nodat_stack, THREAD_SIZE_ORDER); @@ -622,7 +622,7 @@ int smp_store_status(int cpu) return -EIO; if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) return 0; - pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK); + pa = lc->mcesad & MCESA_ORIGIN_MASK; if (MACHINE_HAS_GS) pa |= lc->mcesad & MCESA_LC_MASK; if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, @@ -658,26 +658,22 @@ int smp_store_status(int cpu) * deactivates the elfcorehdr= kernel parameter */ static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr, - bool is_boot_cpu, unsigned long page) + bool is_boot_cpu, __vector128 *vxrs) { - __vector128 *vxrs = (__vector128 *) page; - if (is_boot_cpu) vxrs = boot_cpu_vector_save_area; else - __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page); + __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(vxrs)); save_area_add_vxrs(sa, vxrs); } static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, - bool is_boot_cpu, unsigned long page) + bool is_boot_cpu, void *regs) { - void *regs = (void *) page; - if (is_boot_cpu) copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); else - __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); + __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(regs)); save_area_add_regs(sa, regs); } @@ -685,14 +681,14 @@ void __init smp_save_dump_cpus(void) { int addr, boot_cpu_addr, max_cpu_addr; struct save_area *sa; - unsigned long page; bool is_boot_cpu; + void *page; if (!(oldmem_data.start || is_ipl_type_dump())) /* No previous system present, normal boot. */ return; /* Allocate a page as dumping area for the store status sigps */ - page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); + page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!page) panic("ERROR: Failed to allocate %lx bytes below %lx\n", PAGE_SIZE, 1UL << 31); @@ -723,7 +719,7 @@ void __init smp_save_dump_cpus(void) /* Get the CPU registers */ smp_save_cpu_regs(sa, addr, is_boot_cpu, page); } - memblock_phys_free(page, PAGE_SIZE); + memblock_free(page, PAGE_SIZE); diag_amode31_ops.diag308_reset(); pcpu_set_smt(0); } @@ -880,7 +876,7 @@ void __init smp_detect_cpus(void) /* Add CPUs present at boot */ __smp_rescan_cpus(info, true); - memblock_phys_free((unsigned long)info, sizeof(*info)); + memblock_free(info, sizeof(*info)); } /* @@ -1271,14 +1267,15 @@ static int __init smp_reinit_ipl_cpu(void) { unsigned long async_stack, nodat_stack, mcck_stack; struct lowcore *lc, *lc_ipl; - unsigned long flags; + unsigned long flags, cr0; + u64 mcesad; lc_ipl = lowcore_ptr[0]; lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); async_stack = stack_alloc(); mcck_stack = stack_alloc(); - if (!lc || !nodat_stack || !async_stack || !mcck_stack) + if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad)) panic("Couldn't allocate memory"); local_irq_save(flags); @@ -1287,6 +1284,10 @@ static int __init smp_reinit_ipl_cpu(void) S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET; S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET; S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET; + __ctl_store(cr0, 0, 0); + __ctl_clear_bit(0, 28); /* disable lowcore protection */ + S390_lowcore.mcesad = mcesad; + __ctl_load(cr0, 0, 0); lowcore_ptr[0] = lc; local_mcck_enable(); local_irq_restore(flags); |