summaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2019-05-14 23:35:45 +0200
committerVineet Gupta <vgupta@synopsys.com>2019-07-01 20:02:22 +0200
commit02c88d142ea6e64b0f81dcf3687a889d8a3556ba (patch)
tree434933eae5493527099c63fe32c22094c9f9be71 /arch/arc
parentARC: mm: do_page_fault refactor #3: tidyup vma access permission code (diff)
downloadlinux-02c88d142ea6e64b0f81dcf3687a889d8a3556ba.tar.xz
linux-02c88d142ea6e64b0f81dcf3687a889d8a3556ba.zip
ARC: mm: do_page_fault refactor #4: consolidate retry related logic
stats update code can now elide "retry" check and additional level of indentation since all retry handling is done ahead of it already Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/mm/fault.c60
1 files changed, 31 insertions, 29 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 8c7c81ce7f6a..4597b4886edd 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -65,8 +65,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct mm_struct *mm = tsk->mm;
int si_code = SEGV_MAPERR;
unsigned int write = 0, exec = 0, mask;
- vm_fault_t fault;
- unsigned int flags;
+ vm_fault_t fault; /* handle_mm_fault() output */
+ unsigned int flags; /* handle_mm_fault() input */
/*
* NOTE! We MUST NOT take any locks for this case. We may
@@ -125,49 +125,51 @@ retry:
goto bad_area;
}
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
fault = handle_mm_fault(vma, address, flags);
- if (fatal_signal_pending(current)) {
+ /*
+ * Fault retry nuances
+ */
+ if (unlikely(fault & VM_FAULT_RETRY)) {
/*
- * if fault retry, mmap_sem already relinquished by core mm
- * so OK to return to user mode (with signal handled first)
+ * If fault needs to be retried, handle any pending signals
+ * first (by returning to user mode).
+ * mmap_sem already relinquished by core mm for RETRY case
*/
- if (fault & VM_FAULT_RETRY) {
+ if (fatal_signal_pending(current)) {
if (!user_mode(regs))
goto no_context;
return;
}
+ /*
+ * retry state machine
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
}
+ /*
+ * Major/minor page fault accounting
+ * (in case of retry we only land here once)
+ */
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (likely(!(fault & VM_FAULT_ERROR))) {
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- /* To avoid updating stats twice for retry case */
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
-
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
}
- /* Fault Handled Gracefully */
+ /* Normal return path: fault Handled Gracefully */
up_read(&mm->mmap_sem);
return;
}