diff options
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r-- | arch/sh/mm/fault.c | 87 |
1 files changed, 87 insertions, 0 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 716ebf568af2..fa5d7f0b9f18 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -17,6 +17,7 @@ #include <linux/kprobes.h> #include <asm/system.h> #include <asm/mmu_context.h> +#include <asm/tlbflush.h> #include <asm/kgdb.h> extern void die(const char *,struct pt_regs *,long); @@ -224,3 +225,89 @@ do_sigbus: if (!user_mode(regs)) goto no_context; } + +#ifdef CONFIG_SH_STORE_QUEUES +/* + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + +/* + * Called with interrupts disabled. + */ +asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + struct mm_struct *mm = current->mm; + spinlock_t *ptl; + int ret = 1; + +#ifdef CONFIG_SH_KGDB + if (kgdb_nofault && kgdb_bus_err_hook) + kgdb_bus_err_hook(); +#endif + + /* + * We don't take page faults for P1, P2, and parts of P4, these + * are always mapped, whether it be due to legacy behaviour in + * 29-bit mode, or due to PMB configuration in 32-bit mode. + */ + if (address >= P3SEG && address < P3_ADDR_MAX) { + pgd = pgd_offset_k(address); + mm = NULL; + } else { + if (unlikely(address >= TASK_SIZE || !mm)) + return 1; + + pgd = pgd_offset(mm, address); + } + + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 1; + pmd = pmd_offset(pud, address); + if (pmd_none_or_clear_bad(pmd)) + return 1; + + if (mm) + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + else + pte = pte_offset_kernel(pmd, address); + + entry = *pte; + if (unlikely(pte_none(entry) || pte_not_present(entry))) + goto unlock; + if (unlikely(writeaccess && !pte_write(entry))) + goto unlock; + + if (writeaccess) + entry = pte_mkdirty(entry); + entry = pte_mkyoung(entry); + +#ifdef CONFIG_CPU_SH4 + /* + * ITLB is not affected by "ldtlb" instruction. + * So, we need to flush the entry by ourselves. + */ + local_flush_tlb_one(get_asid(), address & PAGE_MASK); +#endif + + set_pte(pte, entry); + update_mmu_cache(NULL, address, entry); + ret = 0; +unlock: + if (mm) + pte_unmap_unlock(pte, ptl); + return ret; +} |