summaryrefslogtreecommitdiff
path: root/system/easy-kernel/0404-page-cache-not-found.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/easy-kernel/0404-page-cache-not-found.patch')
-rw-r--r--system/easy-kernel/0404-page-cache-not-found.patch48
1 files changed, 48 insertions, 0 deletions
diff --git a/system/easy-kernel/0404-page-cache-not-found.patch b/system/easy-kernel/0404-page-cache-not-found.patch
new file mode 100644
index 000000000..eaa82b64c
--- /dev/null
+++ b/system/easy-kernel/0404-page-cache-not-found.patch
@@ -0,0 +1,48 @@
+From: Yin Fengwei <fengwei.yin@intel.com>
+Date: Wed, 28 Jun 2023 12:43:03 +0800
+Subject: [PATCH 06/16] readahead: correct the start and size in
+ ondemand_readahead()
+
+Commit 9425c591e06a ("page cache: fix page_cache_next/prev_miss off by
+one") updated the page_cache_next_miss() to return the index beyond range.
+
+But it breaks the start/size of ra in ondemand_readahead() because the
+offset by one is accumulated to readahead_index. As a consequence, not
+best readahead order is picked.
+
+Link: https://lkml.kernel.org/r/20230628044303.1412624-1-fengwei.yin@intel.com
+Fixes: 9425c591e06a ("page cache: fix page_cache_next/prev_miss off by one")
+Reported-by: kernel test robot <oliver.sang@intel.com>
+Closes: https://lore.kernel.org/oe-lkp/202306211346.1e9ff03e-oliver.sang@intel.com
+Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
+Cc: Ackerley Tng <ackerleytng@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+ mm/readahead.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 6925e6959..1c1d26ad0 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -600,9 +600,17 @@ static void ondemand_readahead(struct readahead_control *ractl,
+ max_pages);
+ rcu_read_unlock();
+
+- if (!start || start - index > max_pages)
++ if (!start || start - index - 1 > max_pages)
+ return;
+
++ /*
++ * If no gaps in the range, page_cache_next_miss() returns
++ * index beyond range. Adjust it back to make sure
++ * ractl->_index is updated correctly later.
++ */
++ if ((start - index - 1) == max_pages)
++ start--;
++
+ ra->start = start;
+ ra->size = start - index; /* old async_size */
+ ra->size += req_size;