diff options
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r-- | arch/mips/kernel/traps.c | 61 |
1 files changed, 52 insertions, 9 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 51706d6dd5b0..22b19c275044 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -90,6 +90,7 @@ extern asmlinkage void handle_mt(void); extern asmlinkage void handle_dsp(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); +extern void tlb_do_page_fault_0(void); void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); @@ -1088,13 +1089,19 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, static int enable_restore_fp_context(int msa) { - int err, was_fpu_owner; + int err, was_fpu_owner, prior_msa; if (!used_math()) { /* First time FP context user. */ + preempt_disable(); err = init_fpu(); - if (msa && !err) + if (msa && !err) { enable_msa(); + _init_msa_upper(); + set_thread_flag(TIF_USEDMSA); + set_thread_flag(TIF_MSA_CTX_LIVE); + } + preempt_enable(); if (!err) set_used_math(); return err; @@ -1134,10 +1141,11 @@ static int enable_restore_fp_context(int msa) * This task is using or has previously used MSA. Thus we require * that Status.FR == 1. */ + preempt_disable(); was_fpu_owner = is_fpu_owner(); - err = own_fpu(0); + err = own_fpu_inatomic(0); if (err) - return err; + goto out; enable_msa(); write_msa_csr(current->thread.fpu.msacsr); @@ -1146,13 +1154,42 @@ static int enable_restore_fp_context(int msa) /* * If this is the first time that the task is using MSA and it has * previously used scalar FP in this time slice then we already nave - * FP context which we shouldn't clobber. + * FP context which we shouldn't clobber. We do however need to clear + * the upper 64b of each vector register so that this task has no + * opportunity to see data left behind by another. */ - if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) - return 0; + prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); + if (!prior_msa && was_fpu_owner) { + _init_msa_upper(); + + goto out; + } + + if (!prior_msa) { + /* + * Restore the least significant 64b of each vector register + * from the existing scalar FP context. + */ + _restore_fp(current); + + /* + * The task has not formerly used MSA, so clear the upper 64b + * of each vector register such that it cannot see data left + * behind by another task. + */ + _init_msa_upper(); + } else { + /* We need to restore the vector context. */ + restore_msa(current); + + /* Restore the scalar FP control & status register */ + if (!was_fpu_owner) + asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31)); + } + +out: + preempt_enable(); - /* We need to restore the vector context. */ - restore_msa(current); return 0; } @@ -2114,6 +2151,12 @@ void __init trap_init(void) set_except_vector(15, handle_fpe); set_except_vector(16, handle_ftlb); + + if (cpu_has_rixiex) { + set_except_vector(19, tlb_do_page_fault_0); + set_except_vector(20, tlb_do_page_fault_0); + } + set_except_vector(21, handle_msa); set_except_vector(22, handle_mdmx); |