summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2019-04-19 02:50:09 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2019-04-19 18:46:04 +0200
commit64165b1affc5bc16231ac971e66aae7d68d57f2c (patch)
treef076121056eafaf3790cbc23cc0be88b4bc85d79 /mm/swapfile.c
parentmm: swapoff: remove too limiting SWAP_UNUSE_MAX_TRIES (diff)
downloadlinux-64165b1affc5bc16231ac971e66aae7d68d57f2c.tar.xz
linux-64165b1affc5bc16231ac971e66aae7d68d57f2c.zip
mm: swapoff: take notice of completion sooner
The old try_to_unuse() implementation was driven by find_next_to_unuse(), which terminated as soon as all the swap had been freed. Add inuse_pages checks now (alongside signal_pending()) to stop scanning mms and swap_map once finished. The same ought to be done in shmem_unuse() too, but never was before, and needs a different interface: so leave it as is for now. Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1904081258200.1523@eggly.anvils Fixes: b56a2d8af914 ("mm: rid swapoff of quadratic complexity") Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Alex Xu (Hello71)" <alex_y_xu@yahoo.ca> Cc: Huang Ying <ying.huang@intel.com> Cc: Kelley Nielsen <kelleynnn@gmail.com> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Rik van Riel <riel@surriel.com> Cc: Vineeth Pillai <vpillai@digitalocean.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bf4ef2e40f23..71383625a582 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2051,11 +2051,9 @@ retry:
spin_lock(&mmlist_lock);
p = &init_mm.mmlist;
- while ((p = p->next) != &init_mm.mmlist) {
- if (signal_pending(current)) {
- retval = -EINTR;
- break;
- }
+ while (si->inuse_pages &&
+ !signal_pending(current) &&
+ (p = p->next) != &init_mm.mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!mmget_not_zero(mm))
@@ -2082,7 +2080,9 @@ retry:
mmput(prev_mm);
i = 0;
- while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+ while (si->inuse_pages &&
+ !signal_pending(current) &&
+ (i = find_next_to_unuse(si, i, frontswap)) != 0) {
entry = swp_entry(type, i);
page = find_get_page(swap_address_space(entry), i);
@@ -2123,8 +2123,11 @@ retry:
* separate lists, and wait for those lists to be emptied; but it's
* easier and more robust (though cpu-intensive) just to keep retrying.
*/
- if (si->inuse_pages)
- goto retry;
+ if (si->inuse_pages) {
+ if (!signal_pending(current))
+ goto retry;
+ retval = -EINTR;
+ }
out:
return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
}