summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 24b7ac2bc36b..31f698862420 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -927,7 +927,7 @@ retry:
break;
}
- done_index = page->index + 1;
+ done_index = page->index;
lock_page(page);
@@ -977,6 +977,7 @@ continue_unlock:
* not be suitable for data integrity
* writeout).
*/
+ done_index = page->index + 1;
done = 1;
break;
}
@@ -1217,6 +1218,17 @@ int set_page_dirty(struct page *page)
if (likely(mapping)) {
int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
+ /*
+ * readahead/lru_deactivate_page could remain
+ * PG_readahead/PG_reclaim due to race with end_page_writeback
+ * About readahead, if the page is written, the flags would be
+ * reset. So no problem.
+ * About lru_deactivate_page, if the page is redirty, the flag
+ * will be reset. So no problem. but if the page is used by readahead
+ * it will confuse readahead and make it restart the size rampup
+ * process. But it's a trivial problem.
+ */
+ ClearPageReclaim(page);
#ifdef CONFIG_BLOCK
if (!spd)
spd = __set_page_dirty_buffers;
@@ -1272,7 +1284,6 @@ int clear_page_dirty_for_io(struct page *page)
BUG_ON(!PageLocked(page));
- ClearPageReclaim(page);
if (mapping && mapping_cap_account_dirty(mapping)) {
/*
* Yes, Virginia, this is indeed insane.