summaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 9ea5125a0dce..d92a5e8d89c4 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -549,7 +549,6 @@ static void ondemand_readahead(struct readahead_control *ractl,
struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
struct file_ra_state *ra = ractl->ra;
unsigned long max_pages = ra->ra_pages;
- unsigned long add_pages;
pgoff_t index = readahead_index(ractl);
pgoff_t expected, prev_index;
unsigned int order = folio ? folio_order(folio) : 0;
@@ -638,26 +637,10 @@ static void ondemand_readahead(struct readahead_control *ractl,
initial_readahead:
ra->start = index;
ra->size = get_init_ra_size(req_size, max_pages);
- ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
+ ra->async_size = ra->size > req_size ? ra->size - req_size :
+ ra->size >> 1;
readit:
- /*
- * Will this read hit the readahead marker made by itself?
- * If so, trigger the readahead marker hit now, and merge
- * the resulted next readahead window into the current one.
- * Take care of maximum IO pages as above.
- */
- if (index == ra->start && ra->size == ra->async_size) {
- add_pages = get_next_ra_size(ra, max_pages);
- if (ra->size + add_pages <= max_pages) {
- ra->async_size = add_pages;
- ra->size += add_pages;
- } else {
- ra->size = max_pages;
- ra->async_size = max_pages >> 1;
- }
- }
-
ractl->_index = ra->start;
page_cache_ra_order(ractl, ra, order);
}