summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-05-25 02:12:28 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 17:39:26 +0200
commit275b12bf5486f6f531111fd3d7dbbf01df427cfe (patch)
treeebed0109eaa3e4a015bc1653f4d933f2e84c794f /mm
parentvmscan: change shrinker API by passing shrink_control struct (diff)
downloadlinux-275b12bf5486f6f531111fd3d7dbbf01df427cfe.tar.xz
linux-275b12bf5486f6f531111fd3d7dbbf01df427cfe.zip
readahead: return early when readahead is disabled
Reduce readahead overheads by returning early in do_sync_mmap_readahead(). tmpfs has ra_pages=0 and it can page fault really fast (not constraint by IO if not swapping). Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Tested-by: Tim Chen <tim.c.chen@intel.com> Reported-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 88354ae0b1fd..c974a2863897 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1556,6 +1556,8 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
+ if (!ra->ra_pages)
+ return;
if (VM_SequentialReadHint(vma) ||
offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
@@ -1578,12 +1580,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
* mmap read-around
*/
ra_pages = max_sane_readahead(ra->ra_pages);
- if (ra_pages) {
- ra->start = max_t(long, 0, offset - ra_pages/2);
- ra->size = ra_pages;
- ra->async_size = 0;
- ra_submit(ra, mapping, file);
- }
+ ra->start = max_t(long, 0, offset - ra_pages / 2);
+ ra->size = ra_pages;
+ ra->async_size = 0;
+ ra_submit(ra, mapping, file);
}
/*