diff options
author | Jan Kara <jack@suse.cz> | 2012-05-03 14:47:58 +0200 |
---|---|---|
committer | Fengguang Wu <fengguang.wu@intel.com> | 2012-05-06 07:43:39 +0200 |
commit | ccb26b5a65867839d95156e02ea4861f64a8cbf3 (patch) | |
tree | 36ee626d4525171d6a8bb2e4582bd9e0a811bb2d /fs/fs-writeback.c | |
parent | writeback: Move I_DIRTY_PAGES handling (diff) | |
download | linux-ccb26b5a65867839d95156e02ea4861f64a8cbf3.tar.xz linux-ccb26b5a65867839d95156e02ea4861f64a8cbf3.zip |
writeback: Separate inode requeueing after writeback
Move inode requeueing after inode has been written out into a separate
function.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 102 |
1 files changed, 55 insertions, 47 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3804a10f2be7..5d3de002cb8e 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -345,6 +345,60 @@ static void inode_wait_for_writeback(struct inode *inode, } /* + * Find proper writeback list for the inode depending on its current state and + * possibly also change of its state while we were doing writeback. Here we + * handle things such as livelock prevention or fairness of writeback among + * inodes. This function can be called only by flusher thread - noone else + * processes all inodes in writeback lists and requeueing inodes behind flusher + * thread's back can have unexpected consequences. + */ +static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, + struct writeback_control *wbc) +{ + if (inode->i_state & I_FREEING) + return; + + /* + * Sync livelock prevention. Each inode is tagged and synced in one + * shot. If still dirty, it will be redirty_tail()'ed below. Update + * the dirty time to prevent enqueue and sync it again. + */ + if ((inode->i_state & I_DIRTY) && + (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) + inode->dirtied_when = jiffies; + + if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { + /* + * We didn't write back all the pages. nfs_writepages() + * sometimes bales out without doing anything. + */ + if (wbc->nr_to_write <= 0) { + /* Slice used up. Queue for next turn. */ + requeue_io(inode, wb); + } else { + /* + * Writeback blocked by something other than + * congestion. Delay the inode for some time to + * avoid spinning on the CPU (100% iowait) + * retrying writeback of the dirty page/inode + * that cannot be performed immediately. + */ + redirty_tail(inode, wb); + } + } else if (inode->i_state & I_DIRTY) { + /* + * Filesystems can dirty the inode during writeback operations, + * such as delayed allocation during submission or metadata + * updates after data IO completion. + */ + redirty_tail(inode, wb); + } else { + /* The inode is clean. Remove from writeback lists. */ + list_del_init(&inode->i_wb_list); + } +} + +/* * Write out an inode's dirty pages. Called under wb->list_lock and * inode->i_lock. Either the caller has an active reference on the inode or * the inode has I_WILL_FREE set. @@ -422,53 +476,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); - if (!(inode->i_state & I_FREEING)) { - /* - * Sync livelock prevention. Each inode is tagged and synced in - * one shot. If still dirty, it will be redirty_tail()'ed below. - * Update the dirty time to prevent enqueue and sync it again. - */ - if ((inode->i_state & I_DIRTY) && - (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)) - inode->dirtied_when = jiffies; - - if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { - /* - * We didn't write back all the pages. nfs_writepages() - * sometimes bales out without doing anything. - */ - if (wbc->nr_to_write <= 0) { - /* - * slice used up: queue for next turn - */ - requeue_io(inode, wb); - } else { - /* - * Writeback blocked by something other than - * congestion. Delay the inode for some time to - * avoid spinning on the CPU (100% iowait) - * retrying writeback of the dirty page/inode - * that cannot be performed immediately. - */ - redirty_tail(inode, wb); - } - } else if (inode->i_state & I_DIRTY) { - /* - * Filesystems can dirty the inode during writeback - * operations, such as delayed allocation during - * submission or metadata updates after data IO - * completion. - */ - redirty_tail(inode, wb); - } else { - /* - * The inode is clean. At this point we either have - * a reference to the inode or it's on it's way out. - * No need to add it back to the LRU. - */ - list_del_init(&inode->i_wb_list); - } - } + requeue_inode(inode, wb, wbc); inode_sync_complete(inode); trace_writeback_single_inode(inode, wbc, nr_to_write); return ret; |