diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/alternative.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 7 | ||||
-rw-r--r-- | arch/s390/kernel/cpcmd.c | 42 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/diag.c | 11 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 61 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 14 | ||||
-rw-r--r-- | arch/s390/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/nospec-branch.c | 17 | ||||
-rw-r--r-- | arch/s390/kernel/nospec-sysfs.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_cf.c | 92 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_cf_common.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/processor.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 107 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 131 | ||||
-rw-r--r-- | arch/s390/kernel/sthyi.c | 13 | ||||
-rw-r--r-- | arch/s390/kernel/syscall.c | 5 | ||||
-rw-r--r-- | arch/s390/kernel/sysinfo.c | 19 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/uv.c | 8 |
22 files changed, 206 insertions, 353 deletions
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c index 8e1f2aee85ef..c22ea1c3ef84 100644 --- a/arch/s390/kernel/alternative.c +++ b/arch/s390/kernel/alternative.c @@ -76,8 +76,7 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start, instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; - if (!__test_facility(a->facility, - S390_lowcore.alt_stfle_fac_list)) + if (!__test_facility(a->facility, alt_stfle_fac_list)) continue; if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) { diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 15e637728a4b..f53605a3dfcd 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -15,6 +15,7 @@ #include <asm/idle.h> #include <asm/gmap.h> #include <asm/nmi.h> +#include <asm/setup.h> #include <asm/stacktrace.h> int main(void) @@ -58,8 +59,6 @@ int main(void) OFFSET(__LC_EXT_PARAMS, lowcore, ext_params); OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr); OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code); - OFFSET(__LC_SVC_ILC, lowcore, svc_ilc); - OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code); OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc); OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code); OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code); @@ -77,8 +76,6 @@ int main(void) OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr); OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm); OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word); - OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list); - OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list); OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code); OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code); OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address); @@ -159,5 +156,7 @@ int main(void) OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len); DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region)); + /* sizeof kernel parameter area */ + DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea)); return 0; } diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index 2da027359798..54efc279f54e 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -26,33 +26,35 @@ static char cpcmd_buf[241]; static int diag8_noresponse(int cmdlen) { - register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; - register unsigned long reg3 asm ("3") = cmdlen; - asm volatile( - " diag %1,%0,0x8\n" - : "+d" (reg3) : "d" (reg2) : "cc"); - return reg3; + " diag %[rx],%[ry],0x8\n" + : [ry] "+&d" (cmdlen) + : [rx] "d" ((addr_t) cpcmd_buf) + : "cc"); + return cmdlen; } static int diag8_response(int cmdlen, char *response, int *rlen) { - unsigned long _cmdlen = cmdlen | 0x40000000L; - unsigned long _rlen = *rlen; - register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; - register unsigned long reg3 asm ("3") = (addr_t) response; - register unsigned long reg4 asm ("4") = _cmdlen; - register unsigned long reg5 asm ("5") = _rlen; + union register_pair rx, ry; + int cc; + rx.even = (addr_t) cpcmd_buf; + rx.odd = (addr_t) response; + ry.even = cmdlen | 0x40000000L; + ry.odd = *rlen; asm volatile( - " diag %2,%0,0x8\n" - " brc 8,1f\n" - " agr %1,%4\n" - "1:\n" - : "+d" (reg4), "+d" (reg5) - : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc"); - *rlen = reg5; - return reg4; + " diag %[rx],%[ry],0x8\n" + " ipm %[cc]\n" + " srl %[cc],28\n" + : [cc] "=&d" (cc), [ry] "+&d" (ry.pair) + : [rx] "d" (rx.pair) + : "cc"); + if (cc) + *rlen += ry.odd; + else + *rlen = ry.odd; + return ry.even; } /* diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index bb958d32bd81..09b6c6402f9b 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1418,7 +1418,7 @@ int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view, else except_str = "-"; caller = (unsigned long) entry->caller; - rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %pK ", + rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %04u %px ", area, sec, usec, level, except_str, entry->cpu, (void *)caller); return rc; diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index b8b0cd7b008f..a3f47464c3f1 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -141,16 +141,15 @@ EXPORT_SYMBOL(diag14); static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr) { - register unsigned long _subcode asm("0") = *subcode; - register unsigned long _size asm("1") = size; + union register_pair rp = { .even = *subcode, .odd = size }; asm volatile( - " diag %2,%0,0x204\n" + " diag %[addr],%[rp],0x204\n" "0: nopr %%r7\n" EX_TABLE(0b,0b) - : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory"); - *subcode = _subcode; - return _size; + : [rp] "+&d" (rp.pair) : [addr] "d" (addr) : "memory"); + *subcode = rp.even; + return rp.odd; } int diag204(unsigned long subcode, unsigned long size, void *addr) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index a361d2e70025..c2cf79d353cf 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -180,11 +180,9 @@ static noinline __init void setup_lowcore_early(void) static noinline __init void setup_facility_list(void) { - memcpy(S390_lowcore.alt_stfle_fac_list, - S390_lowcore.stfle_fac_list, - sizeof(S390_lowcore.alt_stfle_fac_list)); + memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list)); if (!IS_ENABLED(CONFIG_KERNEL_NOBP)) - __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + __clear_facility(82, alt_stfle_fac_list); } static __init void detect_diag9c(void) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index e84f495e7eb2..3e8c6669373a 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -129,6 +129,27 @@ _LPP_OFFSET = __LC_LPP "jnz .+8; .long 0xb2e8d000", 82 .endm +#if IS_ENABLED(CONFIG_KVM) + /* + * The OUTSIDE macro jumps to the provided label in case the value + * in the provided register is outside of the provided range. The + * macro is useful for checking whether a PSW stored in a register + * pair points inside or outside of a block of instructions. + * @reg: register to check + * @start: start of the range + * @end: end of the range + * @outside_label: jump here if @reg is outside of [@start..@end) + */ + .macro OUTSIDE reg,start,end,outside_label + lgr %r14,\reg + larl %r13,\start + slgr %r14,%r13 + lghi %r13,\end - \start + clgr %r14,%r13 + jhe \outside_label + .endm +#endif + GEN_BR_THUNK %r14 GEN_BR_THUNK %r14,%r13 @@ -214,7 +235,7 @@ ENTRY(sie64a) # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. # Other instructions between sie64a and .Lsie_done should not cause program # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. -# See also .Lcleanup_sie_mcck/.Lcleanup_sie_int +# See also .Lcleanup_sie .Lrewind_pad6: nopr 7 .Lrewind_pad4: @@ -276,6 +297,7 @@ ENTRY(system_call) xgr %r10,%r10 xgr %r11,%r11 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs + mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC lgr %r3,%r14 brasl %r14,__do_syscall lctlg %c1,%c1,__LC_USER_ASCE @@ -318,12 +340,7 @@ ENTRY(pgm_check_handler) .Lpgm_skip_asce: #if IS_ENABLED(CONFIG_KVM) # cleanup critical section for program checks in sie64a - lgr %r14,%r9 - larl %r13,.Lsie_gmap - slgr %r14,%r13 - lghi %r13,.Lsie_done - .Lsie_gmap - clgr %r14,%r13 - jhe 1f + OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce @@ -392,13 +409,8 @@ ENTRY(\name) tmhh %r8,0x0001 # interrupting from user ? jnz 1f #if IS_ENABLED(CONFIG_KVM) - lgr %r14,%r9 - larl %r13,.Lsie_gmap - slgr %r14,%r13 - lghi %r13,.Lsie_done - .Lsie_gmap - clgr %r14,%r13 - jhe 0f - brasl %r14,.Lcleanup_sie_int + OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f + brasl %r14,.Lcleanup_sie #endif 0: CHECK_STACK __LC_SAVE_AREA_ASYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) @@ -541,13 +553,10 @@ ENTRY(mcck_int_handler) tmhh %r8,0x0001 # interrupting from user ? jnz .Lmcck_user #if IS_ENABLED(CONFIG_KVM) - lgr %r14,%r9 - larl %r13,.Lsie_gmap - slgr %r14,%r13 - lghi %r13,.Lsie_done - .Lsie_gmap - clgr %r14,%r13 - jhe .Lmcck_stack - brasl %r14,.Lcleanup_sie_mcck + OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack + OUTSIDE %r9,.Lsie_entry,.Lsie_skip,5f + oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST +5: brasl %r14,.Lcleanup_sie #endif j .Lmcck_stack .Lmcck_user: @@ -649,21 +658,13 @@ ENDPROC(stack_overflow) #endif #if IS_ENABLED(CONFIG_KVM) -.Lcleanup_sie_mcck: - larl %r13,.Lsie_entry - slgr %r9,%r13 - lghi %r13,.Lsie_skip - .Lsie_entry - clgr %r9,%r13 - jhe .Lcleanup_sie_int - oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST -.Lcleanup_sie_int: +.Lcleanup_sie: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE larl %r9,sie_exit # skip forward to sie_exit BR_EX %r14,%r13 - #endif .section .rodata, "a" #define SYSCALL(esame,emu) .quad __s390x_ ## esame diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 36f870dc944f..50e2c21e0ec9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -163,16 +163,18 @@ static bool reipl_ccw_clear; static inline int __diag308(unsigned long subcode, void *addr) { - register unsigned long _addr asm("0") = (unsigned long) addr; - register unsigned long _rc asm("1") = 0; + union register_pair r1; + r1.even = (unsigned long) addr; + r1.odd = 0; asm volatile( - " diag %0,%2,0x308\n" + " diag %[r1],%[subcode],0x308\n" "0: nopr %%r7\n" EX_TABLE(0b,0b) - : "+d" (_addr), "+d" (_rc) - : "d" (subcode) : "cc", "memory"); - return _rc; + : [r1] "+&d" (r1.pair) + : [subcode] "d" (subcode) + : "cc", "memory"); + return r1.odd; } int diag308(unsigned long subcode, void *addr) diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 714269e10eec..c0df4060d28d 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -146,8 +146,8 @@ void noinstr do_io_irq(struct pt_regs *regs) account_idle_time_irq(); do { - memcpy(®s->int_code, &S390_lowcore.subchannel_id, 12); - if (S390_lowcore.io_int_word & BIT(31)) + regs->tpi_info = S390_lowcore.tpi_info; + if (S390_lowcore.tpi_info.adapter_IO) do_irq_async(regs, THIN_INTERRUPT); else do_irq_async(regs, IO_INTERRUPT); @@ -172,7 +172,7 @@ void noinstr do_ext_irq(struct pt_regs *regs) if (user_mode(regs)) update_timer_sys(); - memcpy(®s->int_code, &S390_lowcore.ext_cpu_addr, 4); + regs->int_code = S390_lowcore.ext_int_code_addr; regs->int_parm = S390_lowcore.ext_params; regs->int_parm_long = S390_lowcore.ext_params2; diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 29e511f5bf06..250e4dbf653c 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -17,11 +17,11 @@ static int __init nobp_setup_early(char *str) * The user explicitely requested nobp=1, enable it and * disable the expoline support. */ - __set_facility(82, S390_lowcore.alt_stfle_fac_list); + __set_facility(82, alt_stfle_fac_list); if (IS_ENABLED(CONFIG_EXPOLINE)) nospec_disable = 1; } else { - __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + __clear_facility(82, alt_stfle_fac_list); } return 0; } @@ -29,7 +29,7 @@ early_param("nobp", nobp_setup_early); static int __init nospec_setup_early(char *str) { - __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + __clear_facility(82, alt_stfle_fac_list); return 0; } early_param("nospec", nospec_setup_early); @@ -40,7 +40,7 @@ static int __init nospec_report(void) pr_info("Spectre V2 mitigation: etokens\n"); if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) pr_info("Spectre V2 mitigation: execute trampolines\n"); - if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) + if (__test_facility(82, alt_stfle_fac_list)) pr_info("Spectre V2 mitigation: limited branch prediction\n"); return 0; } @@ -66,14 +66,14 @@ void __init nospec_auto_detect(void) */ if (__is_defined(CC_USING_EXPOLINE)) nospec_disable = 1; - __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + __clear_facility(82, alt_stfle_fac_list); } else if (__is_defined(CC_USING_EXPOLINE)) { /* * The kernel has been compiled with expolines. * Keep expolines enabled and disable nobp. */ nospec_disable = 0; - __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + __clear_facility(82, alt_stfle_fac_list); } /* * If the kernel has not been compiled with expolines the @@ -86,7 +86,7 @@ static int __init spectre_v2_setup_early(char *str) { if (str && !strncmp(str, "on", 2)) { nospec_disable = 0; - __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + __clear_facility(82, alt_stfle_fac_list); } if (str && !strncmp(str, "off", 3)) nospec_disable = 1; @@ -99,6 +99,7 @@ early_param("spectre_v2", spectre_v2_setup_early); static void __init_or_module __nospec_revert(s32 *start, s32 *end) { enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type; + static const u8 branch[] = { 0x47, 0x00, 0x07, 0x00 }; u8 *instr, *thunk, *br; u8 insnbuf[6]; s32 *epo; @@ -128,7 +129,7 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) continue; - memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4); + memcpy(insnbuf + 2, branch, sizeof(branch)); switch (type) { case BRCL_EXPOLINE: insnbuf[0] = br[0]; diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c index 48f472bf9290..b4b5c8c21166 100644 --- a/arch/s390/kernel/nospec-sysfs.c +++ b/arch/s390/kernel/nospec-sysfs.c @@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, return sprintf(buf, "Mitigation: etokens\n"); if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) return sprintf(buf, "Mitigation: execute trampolines\n"); - if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) + if (__test_facility(82, alt_stfle_fac_list)) return sprintf(buf, "Mitigation: limited branch prediction\n"); return sprintf(buf, "Vulnerable\n"); } diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 31a605bcbc6e..1b7a0525fbed 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -362,15 +362,9 @@ static void cpumf_pmu_start(struct perf_event *event, int flags) struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct hw_perf_event *hwc = &event->hw; - if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + if (!(hwc->state & PERF_HES_STOPPED)) return; - if (WARN_ON_ONCE(hwc->config == -1)) - return; - - if (flags & PERF_EF_RELOAD) - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); - hwc->state = 0; /* (Re-)enable and activate the counter set */ @@ -413,15 +407,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) { struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); - /* Check authorization for the counter set to which this - * counter belongs. - * For group events transaction, the authorization check is - * done in cpumf_pmu_commit_txn(). - */ - if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD)) - if (validate_ctr_auth(&event->hw)) - return -ENOENT; - ctr_set_enable(&cpuhw->state, event->hw.config_base); event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; @@ -449,78 +434,6 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) ctr_set_disable(&cpuhw->state, event->hw.config_base); } -/* - * Start group events scheduling transaction. - * Set flags to perform a single test at commit time. - * - * We only support PERF_PMU_TXN_ADD transactions. Save the - * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD - * transactions. - */ -static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) -{ - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); - - WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ - - cpuhw->txn_flags = txn_flags; - if (txn_flags & ~PERF_PMU_TXN_ADD) - return; - - perf_pmu_disable(pmu); - cpuhw->tx_state = cpuhw->state; -} - -/* - * Stop and cancel a group events scheduling tranctions. - * Assumes cpumf_pmu_del() is called for each successful added - * cpumf_pmu_add() during the transaction. - */ -static void cpumf_pmu_cancel_txn(struct pmu *pmu) -{ - unsigned int txn_flags; - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); - - WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ - - txn_flags = cpuhw->txn_flags; - cpuhw->txn_flags = 0; - if (txn_flags & ~PERF_PMU_TXN_ADD) - return; - - WARN_ON(cpuhw->tx_state != cpuhw->state); - - perf_pmu_enable(pmu); -} - -/* - * Commit the group events scheduling transaction. On success, the - * transaction is closed. On error, the transaction is kept open - * until cpumf_pmu_cancel_txn() is called. - */ -static int cpumf_pmu_commit_txn(struct pmu *pmu) -{ - struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); - u64 state; - - WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ - - if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { - cpuhw->txn_flags = 0; - return 0; - } - - /* check if the updated state can be scheduled */ - state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); - state >>= CPUMF_LCCTL_ENABLE_SHIFT; - if ((state & cpuhw->info.auth_ctl) != state) - return -ENOENT; - - cpuhw->txn_flags = 0; - perf_pmu_enable(pmu); - return 0; -} - /* Performance monitoring unit for s390x */ static struct pmu cpumf_pmu = { .task_ctx_nr = perf_sw_context, @@ -533,9 +446,6 @@ static struct pmu cpumf_pmu = { .start = cpumf_pmu_start, .stop = cpumf_pmu_stop, .read = cpumf_pmu_read, - .start_txn = cpumf_pmu_start_txn, - .commit_txn = cpumf_pmu_commit_txn, - .cancel_txn = cpumf_pmu_cancel_txn, }; static int __init cpumf_pmu_init(void) diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c index 6d53215c8484..2300fbaac556 100644 --- a/arch/s390/kernel/perf_cpum_cf_common.c +++ b/arch/s390/kernel/perf_cpum_cf_common.c @@ -30,7 +30,6 @@ DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = { .alert = ATOMIC64_INIT(0), .state = 0, .flags = 0, - .txn_flags = 0, }; /* Indicator whether the CPU-Measurement Counter Facility Support is ready */ static bool cpum_cf_initalized; diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index c92d04f876cb..82df39b17bb5 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -103,11 +103,9 @@ EXPORT_SYMBOL(cpu_have_feature); static void show_facilities(struct seq_file *m) { unsigned int bit; - long *facilities; - facilities = (long *)&S390_lowcore.stfle_fac_list; seq_puts(m, "facilities :"); - for_each_set_bit_inv(bit, facilities, MAX_FACILITY_BIT) + for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT) seq_printf(m, " %d", bit); seq_putc(m, '\n'); } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 18b3416fd663..0ea3d02b378d 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -975,10 +975,12 @@ static int s390_tdb_get(struct task_struct *target, struct membuf to) { struct pt_regs *regs = task_pt_regs(target); + size_t size; if (!(regs->int_code & 0x200)) return -ENODATA; - return membuf_write(&to, target->thread.trap_tdb, 256); + size = sizeof(target->thread.trap_tdb.data); + return membuf_write(&to, target->thread.trap_tdb.data, size); } static int s390_tdb_set(struct task_struct *target, diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 5aab59ad5688..b58ee83f30e3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -96,7 +96,6 @@ unsigned long int_hwcap = 0; int __bootdata(noexec_disabled); unsigned long __bootdata(ident_map_size); -unsigned long __bootdata(vmalloc_size); struct mem_detect_info __bootdata(mem_detect); struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table); @@ -108,6 +107,9 @@ unsigned long __bootdata_preserved(__edma); unsigned long __bootdata_preserved(__kaslr_offset); unsigned int __bootdata_preserved(zlib_dfltcc_support); EXPORT_SYMBOL(zlib_dfltcc_support); +u64 __bootdata_preserved(stfle_fac_list[16]); +EXPORT_SYMBOL(stfle_fac_list); +u64 __bootdata_preserved(alt_stfle_fac_list[16]); unsigned long VMALLOC_START; EXPORT_SYMBOL(VMALLOC_START); @@ -165,7 +167,7 @@ static void __init set_preferred_console(void) else if (CONSOLE_IS_3270) add_preferred_console("tty3270", 0, NULL); else if (CONSOLE_IS_VT220) - add_preferred_console("ttyS", 1, NULL); + add_preferred_console("ttysclp", 0, NULL); else if (CONSOLE_IS_HVC) add_preferred_console("hvc", 0, NULL); } @@ -338,27 +340,6 @@ int __init arch_early_irq_init(void) return 0; } -static int __init stack_realloc(void) -{ - unsigned long old, new; - - old = S390_lowcore.async_stack - STACK_INIT_OFFSET; - new = stack_alloc(); - if (!new) - panic("Couldn't allocate async stack"); - WRITE_ONCE(S390_lowcore.async_stack, new + STACK_INIT_OFFSET); - free_pages(old, THREAD_SIZE_ORDER); - - old = S390_lowcore.mcck_stack - STACK_INIT_OFFSET; - new = stack_alloc(); - if (!new) - panic("Couldn't allocate machine check stack"); - WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET); - memblock_free_late(old, THREAD_SIZE); - return 0; -} -early_initcall(stack_realloc); - void __init arch_call_rest_init(void) { unsigned long stack; @@ -413,11 +394,6 @@ static void __init setup_lowcore_dat_off(void) lc->lpp = LPP_MAGIC; lc->machine_flags = S390_lowcore.machine_flags; lc->preempt_count = S390_lowcore.preempt_count; - lc->stfl_fac_list = S390_lowcore.stfl_fac_list; - memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - sizeof(lc->stfle_fac_list)); - memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, - sizeof(lc->alt_stfle_fac_list)); nmi_alloc_boot_cpu(lc); lc->sys_enter_timer = S390_lowcore.sys_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; @@ -568,53 +544,10 @@ static void __init setup_resources(void) #endif } -static void __init setup_ident_map_size(void) +static void __init setup_memory_end(void) { - unsigned long vmax, tmp; - - /* Choose kernel address space layout: 3 or 4 levels. */ - tmp = ident_map_size / PAGE_SIZE; - tmp = tmp * (sizeof(struct page) + PAGE_SIZE); - if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) - vmax = _REGION2_SIZE; /* 3-level kernel page table */ - else - vmax = _REGION1_SIZE; /* 4-level kernel page table */ - /* module area is at the end of the kernel address space. */ - MODULES_END = vmax; - if (is_prot_virt_host()) - adjust_to_uv_max(&MODULES_END); -#ifdef CONFIG_KASAN - vmax = _REGION1_SIZE; - MODULES_END = kasan_vmax; -#endif - MODULES_VADDR = MODULES_END - MODULES_LEN; - VMALLOC_END = MODULES_VADDR; - VMALLOC_START = VMALLOC_END - vmalloc_size; - - /* Split remaining virtual space between 1:1 mapping & vmemmap array */ - tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); - /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ - tmp = SECTION_ALIGN_UP(tmp); - tmp = VMALLOC_START - tmp * sizeof(struct page); - tmp &= ~((vmax >> 11) - 1); /* align to page table level */ - tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); - vmemmap = (struct page *) tmp; - - /* Take care that ident_map_size <= vmemmap */ - ident_map_size = min(ident_map_size, (unsigned long)vmemmap); -#ifdef CONFIG_KASAN - ident_map_size = min(ident_map_size, KASAN_SHADOW_START); -#endif - vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page); -#ifdef CONFIG_KASAN - /* move vmemmap above kasan shadow only if stands in a way */ - if (KASAN_SHADOW_END > (unsigned long)vmemmap && - (unsigned long)vmemmap + vmemmap_size > KASAN_SHADOW_START) - vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); -#endif - max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); memblock_remove(ident_map_size, ULONG_MAX); - + max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20); } @@ -653,30 +586,6 @@ static void __init reserve_above_ident_map(void) } /* - * Make sure that oldmem, where the dump is stored, is protected - */ -static void __init reserve_oldmem(void) -{ -#ifdef CONFIG_CRASH_DUMP - if (OLDMEM_BASE) - /* Forget all memory above the running kdump system */ - memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); -#endif -} - -/* - * Make sure that oldmem, where the dump is stored, is protected - */ -static void __init remove_oldmem(void) -{ -#ifdef CONFIG_CRASH_DUMP - if (OLDMEM_BASE) - /* Forget all memory above the running kdump system */ - memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX); -#endif -} - -/* * Reserve memory for kdump kernel to be loaded with kexec */ static void __init reserve_crashkernel(void) @@ -1141,7 +1050,6 @@ void __init setup_arch(char **cmdline_p) /* Do some memory reservations *before* memory is added to memblock */ reserve_above_ident_map(); - reserve_oldmem(); reserve_kernel(); reserve_initrd(); reserve_certificate_list(); @@ -1152,10 +1060,9 @@ void __init setup_arch(char **cmdline_p) memblock_add_mem_detect_info(); free_mem_detect_info(); - remove_oldmem(); setup_uv(); - setup_ident_map_size(); + setup_memory_end(); setup_memory(); dma_contiguous_reserve(ident_map_size); vmcp_cma_reserve(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 111909aeb8d2..ff42d3aa0f00 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -74,7 +74,6 @@ enum { static DEFINE_PER_CPU(struct cpu *, cpu_device); struct pcpu { - struct lowcore *lowcore; /* lowcore page(s) for the cpu */ unsigned long ec_mask; /* bit mask for ec_xxx functions */ unsigned long ec_clk; /* sigp timestamp for ec_xxx */ signed char state; /* physical cpu state */ @@ -194,20 +193,12 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) unsigned long async_stack, nodat_stack, mcck_stack; struct lowcore *lc; - if (pcpu != &pcpu_devices[0]) { - pcpu->lowcore = (struct lowcore *) - __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); - nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); - if (!pcpu->lowcore || !nodat_stack) - goto out; - } else { - nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; - } + lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); + nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); async_stack = stack_alloc(); mcck_stack = stack_alloc(); - if (!async_stack || !mcck_stack) - goto out_stack; - lc = pcpu->lowcore; + if (!lc || !nodat_stack || !async_stack || !mcck_stack) + goto out; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); lc->async_stack = async_stack + STACK_INIT_OFFSET; @@ -220,45 +211,42 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); if (nmi_alloc_per_cpu(lc)) - goto out_stack; + goto out; lowcore_ptr[cpu] = lc; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); return 0; -out_stack: +out: stack_free(mcck_stack); stack_free(async_stack); -out: - if (pcpu != &pcpu_devices[0]) { - free_pages(nodat_stack, THREAD_SIZE_ORDER); - free_pages((unsigned long) pcpu->lowcore, LC_ORDER); - } + free_pages(nodat_stack, THREAD_SIZE_ORDER); + free_pages((unsigned long) lc, LC_ORDER); return -ENOMEM; } static void pcpu_free_lowcore(struct pcpu *pcpu) { - unsigned long async_stack, nodat_stack, mcck_stack, lowcore; - - nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; - async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET; - mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET; - lowcore = (unsigned long) pcpu->lowcore; + unsigned long async_stack, nodat_stack, mcck_stack; + struct lowcore *lc; + int cpu; + cpu = pcpu - pcpu_devices; + lc = lowcore_ptr[cpu]; + nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET; + async_stack = lc->async_stack - STACK_INIT_OFFSET; + mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); - lowcore_ptr[pcpu - pcpu_devices] = NULL; - nmi_free_per_cpu(pcpu->lowcore); + lowcore_ptr[cpu] = NULL; + nmi_free_per_cpu(lc); stack_free(async_stack); stack_free(mcck_stack); - if (pcpu == &pcpu_devices[0]) - return; free_pages(nodat_stack, THREAD_SIZE_ORDER); - free_pages(lowcore, LC_ORDER); + free_pages((unsigned long) lc, LC_ORDER); } static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) { - struct lowcore *lc = pcpu->lowcore; + struct lowcore *lc = lowcore_ptr[cpu]; cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); @@ -275,17 +263,16 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) lc->cregs_save_area[1] = lc->kernel_asce; lc->cregs_save_area[7] = lc->user_asce; save_access_regs((unsigned int *) lc->access_regs_save_area); - memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - sizeof(lc->stfle_fac_list)); - memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, - sizeof(lc->alt_stfle_fac_list)); arch_spin_lock_setup(cpu); } static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) { - struct lowcore *lc = pcpu->lowcore; + struct lowcore *lc; + int cpu; + cpu = pcpu - pcpu_devices; + lc = lowcore_ptr[cpu]; lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long) tsk; @@ -301,8 +288,11 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) { - struct lowcore *lc = pcpu->lowcore; + struct lowcore *lc; + int cpu; + cpu = pcpu - pcpu_devices; + lc = lowcore_ptr[cpu]; lc->restart_stack = lc->nodat_stack; lc->restart_fn = (unsigned long) func; lc->restart_data = (unsigned long) data; @@ -387,7 +377,7 @@ void smp_call_online_cpu(void (*func)(void *), void *data) */ void smp_call_ipl_cpu(void (*func)(void *), void *data) { - struct lowcore *lc = pcpu_devices->lowcore; + struct lowcore *lc = lowcore_ptr[0]; if (pcpu_devices[0].address == stap()) lc = &S390_lowcore; @@ -600,18 +590,21 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); int smp_store_status(int cpu) { - struct pcpu *pcpu = pcpu_devices + cpu; + struct lowcore *lc; + struct pcpu *pcpu; unsigned long pa; - pa = __pa(&pcpu->lowcore->floating_pt_save_area); + pcpu = pcpu_devices + cpu; + lc = lowcore_ptr[cpu]; + pa = __pa(&lc->floating_pt_save_area); if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, pa) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) return 0; - pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK); + pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK); if (MACHINE_HAS_GS) - pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK; + pa |= lc->mcesad & MCESA_LC_MASK; if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, pa) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; @@ -1011,7 +1004,6 @@ void __init smp_prepare_boot_cpu(void) WARN_ON(!cpu_present(0) || !cpu_online(0)); pcpu->state = CPU_STATE_CONFIGURED; - pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); } @@ -1237,3 +1229,54 @@ out: return rc; } subsys_initcall(s390_smp_init); + +static __always_inline void set_new_lowcore(struct lowcore *lc) +{ + union register_pair dst, src; + u32 pfx; + + src.even = (unsigned long) &S390_lowcore; + src.odd = sizeof(S390_lowcore); + dst.even = (unsigned long) lc; + dst.odd = sizeof(*lc); + pfx = (unsigned long) lc; + + asm volatile( + " mvcl %[dst],%[src]\n" + " spx %[pfx]\n" + : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair) + : [pfx] "Q" (pfx) + : "memory", "cc"); +} + +static int __init smp_reinit_ipl_cpu(void) +{ + unsigned long async_stack, nodat_stack, mcck_stack; + struct lowcore *lc, *lc_ipl; + unsigned long flags; + + lc_ipl = lowcore_ptr[0]; + lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); + nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); + async_stack = stack_alloc(); + mcck_stack = stack_alloc(); + if (!lc || !nodat_stack || !async_stack || !mcck_stack) + panic("Couldn't allocate memory"); + + local_irq_save(flags); + local_mcck_disable(); + set_new_lowcore(lc); + S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET; + S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET; + S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET; + lowcore_ptr[0] = lc; + local_mcck_enable(); + local_irq_restore(flags); + + free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER); + memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE); + memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl)); + + return 0; +} +early_initcall(smp_reinit_ipl_cpu); diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 888cc2f166db..4d141e2c132e 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -395,19 +395,18 @@ out: static int sthyi(u64 vaddr, u64 *rc) { - register u64 code asm("0") = 0; - register u64 addr asm("2") = vaddr; - register u64 rcode asm("3"); + union register_pair r1 = { .even = 0, }; /* subcode */ + union register_pair r2 = { .even = vaddr, }; int cc; asm volatile( - ".insn rre,0xB2560000,%[code],%[addr]\n" + ".insn rre,0xB2560000,%[r1],%[r2]\n" "ipm %[cc]\n" "srl %[cc],28\n" - : [cc] "=d" (cc), "=d" (rcode) - : [code] "d" (code), [addr] "a" (addr) + : [cc] "=&d" (cc), [r2] "+&d" (r2.pair) + : [r1] "d" (r1.pair) : "memory", "cc"); - *rc = rcode; + *rc = r2.odd; return cc; } diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c index 4e5cc7d2364e..76f7916cc30f 100644 --- a/arch/s390/kernel/syscall.c +++ b/arch/s390/kernel/syscall.c @@ -144,11 +144,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap) { add_random_kstack_offset(); enter_from_user_mode(regs); - - memcpy(®s->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long)); - memcpy(®s->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code)); regs->psw = S390_lowcore.svc_old_psw; - + regs->int_code = S390_lowcore.svc_int_code; update_timer_sys(); local_irq_enable(); diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 2ac3c9b56a13..ef3f2659876c 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -25,19 +25,22 @@ int topology_max_mnest; static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl) { - register int r0 asm("0") = (fc << 28) | sel1; - register int r1 asm("1") = sel2; + int r0 = (fc << 28) | sel1; int rc = 0; asm volatile( - " stsi 0(%3)\n" + " lr 0,%[r0]\n" + " lr 1,%[r1]\n" + " stsi 0(%[sysinfo])\n" "0: jz 2f\n" - "1: lhi %1,%4\n" - "2:\n" + "1: lhi %[rc],%[retval]\n" + "2: lr %[r0],0\n" EX_TABLE(0b, 1b) - : "+d" (r0), "+d" (rc) - : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP) - : "cc", "memory"); + : [r0] "+d" (r0), [rc] "+d" (rc) + : [r1] "d" (sel2), + [sysinfo] "a" (sysinfo), + [retval] "K" (-EOPNOTSUPP) + : "cc", "0", "1", "memory"); *lvl = ((unsigned int) r0) >> 28; return rc; } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 8dd23c703718..019c5748b607 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -36,7 +36,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) unsigned long address; if (regs->int_code & 0x200) - address = *(unsigned long *)(current->thread.trap_tdb + 24); + address = current->thread.trap_tdb.data[3]; else address = regs->psw.addr; return (void __user *) (address - (regs->int_code >> 16)); @@ -318,7 +318,7 @@ void noinstr __do_pgm_check(struct pt_regs *regs) if (S390_lowcore.pgm_code & 0x0200) { /* transaction abort */ - memcpy(¤t->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256); + current->thread.trap_tdb = S390_lowcore.pgm_tdb; } if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) { diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 370f664580af..6be2167943bb 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -52,7 +52,7 @@ void __init setup_uv(void) unsigned long uv_stor_base; /* - * keep these conditions in line with kasan init code has_uv_sec_stor_limit() + * keep these conditions in line with has_uv_sec_stor_limit() */ if (!is_prot_virt_host()) return; @@ -91,12 +91,6 @@ fail: prot_virt_host = 0; } -void adjust_to_uv_max(unsigned long *vmax) -{ - if (uv_info.max_sec_stor_addr) - *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr); -} - /* * Requests the Ultravisor to pin the page in the shared state. This will * cause an intercept when the guest attempts to unshare the pinned page. |