summaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2018-10-27 00:03:53 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-27 01:25:19 +0200
commit979aafa5919b106a65646ac5f73f8354c0164e63 (patch)
treecd338b6a7f932b757020704eefdf1c3e66cebf7e /mm/swapfile.c
parentmm/swapfile.c: call free_swap_slot() in __swap_entry_free() (diff)
downloadlinux-979aafa5919b106a65646ac5f73f8354c0164e63.tar.xz
linux-979aafa5919b106a65646ac5f73f8354c0164e63.zip
mm/swapfile.c: clear si->swap_map[] in swap_free_cluster()
si->swap_map[] of the swap entries in cluster needs to be cleared during freeing. Previously, this is done in the caller of swap_free_cluster(). This may cause code duplication (one user now, will add more users later) and lock/unlock cluster unnecessarily. In this patch, the clearing code is moved to swap_free_cluster() to avoid the downside. Link: http://lkml.kernel.org/r/20180827075535.17406-4-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c4
1 files changed, 1 insertions, 3 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index eaa5e2e55cab..2681e50592c5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -932,6 +932,7 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
struct swap_cluster_info *ci;
ci = lock_cluster(si, offset);
+ memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
cluster_set_count_flag(ci, 0, 0);
free_cluster(si, idx);
unlock_cluster(ci);
@@ -1250,9 +1251,6 @@ void put_swap_page(struct page *page, swp_entry_t entry)
if (free_entries == SWAPFILE_CLUSTER) {
unlock_cluster_or_swap_info(si, ci);
spin_lock(&si->lock);
- ci = lock_cluster(si, offset);
- memset(map, 0, SWAPFILE_CLUSTER);
- unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
swap_free_cluster(si, idx);
spin_unlock(&si->lock);