diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 8 | ||||
-rw-r--r-- | mm/readahead.c | 20 |
2 files changed, 22 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 1a6beaf69f49..e4101b5bfa82 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2199,6 +2199,14 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; offset = *ppos & ~PAGE_MASK; + /* + * If we've already successfully copied some data, then we + * can no longer safely return -EIOCBQUEUED. Hence mark + * an async read NOWAIT at that point. + */ + if (written && (iocb->ki_flags & IOCB_WAITQ)) + iocb->ki_flags |= IOCB_NOWAIT; + for (;;) { struct page *page; pgoff_t end_index; diff --git a/mm/readahead.c b/mm/readahead.c index c6ffb76827da..c5b0457415be 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -552,15 +552,23 @@ readit: void page_cache_sync_ra(struct readahead_control *ractl, struct file_ra_state *ra, unsigned long req_count) { - /* no read-ahead */ - if (!ra->ra_pages) - return; + bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); - if (blk_cgroup_congested()) - return; + /* + * Even if read-ahead is disabled, issue this request as read-ahead + * as we'll need it to satisfy the requested range. The forced + * read-ahead will do the right thing and limit the read to just the + * requested range, which we'll set to 1 page for this case. + */ + if (!ra->ra_pages || blk_cgroup_congested()) { + if (!ractl->file) + return; + req_count = 1; + do_forced_ra = true; + } /* be dumb */ - if (ractl->file && (ractl->file->f_mode & FMODE_RANDOM)) { + if (do_forced_ra) { force_page_cache_ra(ractl, ra, req_count); return; } |