summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 10:24:42 +0200
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 18:42:53 +0200
commitb55ed816235cf41c29159d22a4cdeec7deb5821c (patch)
tree73245c1da2c5352c526d4d25f02e69943b4b0b15 /mm
parentmm: improve find_lock_page (diff)
downloadlinux-b55ed816235cf41c29159d22a4cdeec7deb5821c.tar.xz
linux-b55ed816235cf41c29159d22a4cdeec7deb5821c.zip
mm: clarify __add_to_swap_cache locking
__add_to_swap_cache unconditionally sets the page locked, which can be a bit alarming to the unsuspecting reader: in the code paths where the page is visible to other CPUs, the page should be (and is) already locked. Instead, just add a check to ensure the page is locked here, and teach the one path relying on the old behaviour to call SetPageLocked itself. [hugh@veritas.com: locking fix] Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swap_state.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 67daecb6031a..b52635601dfe 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -74,6 +74,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
{
int error;
+ BUG_ON(!PageLocked(page));
BUG_ON(PageSwapCache(page));
BUG_ON(PagePrivate(page));
error = radix_tree_preload(gfp_mask);
@@ -83,7 +84,6 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
entry.val, page);
if (!error) {
page_cache_get(page);
- SetPageLocked(page);
SetPageSwapCache(page);
set_page_private(page, entry.val);
total_swapcache_pages++;
@@ -99,15 +99,18 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
+ BUG_ON(PageLocked(page));
if (!swap_duplicate(entry)) {
INC_CACHE_INFO(noent_race);
return -ENOENT;
}
+ SetPageLocked(page);
error = __add_to_swap_cache(page, entry, GFP_KERNEL);
/*
* Anon pages are already on the LRU, we don't run lru_cache_add here.
*/
if (error) {
+ ClearPageLocked(page);
swap_free(entry);
if (error == -EEXIST)
INC_CACHE_INFO(exist_race);