summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-05-04 22:21:16 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2023-05-04 22:21:16 +0200
commita1fd058b07d58c3372c1aba25dd65f1c9c9b65f1 (patch)
treeb271ac1faa0c2c2dd3f3202a09b84d4f98fe3ceb /mm
parentMerge tag 'mm-stable-2023-05-03-16-22' of git://git.kernel.org/pub/scm/linux/... (diff)
parentmm: change per-VMA lock statistics to be disabled by default (diff)
downloadlinux-a1fd058b07d58c3372c1aba25dd65f1c9c9b65f1.tar.xz
linux-a1fd058b07d58c3372c1aba25dd65f1c9c9b65f1.zip
Merge tag 'mm-hotfixes-stable-2023-05-03-16-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hitfixes from Andrew Morton: "Five hotfixes. Three are cc:stable, two for this -rc cycle" * tag 'mm-hotfixes-stable-2023-05-03-16-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm: change per-VMA lock statistics to be disabled by default MAINTAINERS: update Michal Simek's email mm/mempolicy: correctly update prev when policy is equal on mbind relayfs: fix out-of-bounds access in relay_file_read kasan: hw_tags: avoid invalid virt_to_page()
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig.debug10
-rw-r--r--mm/kasan/hw_tags.c4
-rw-r--r--mm/mempolicy.c4
3 files changed, 13 insertions, 5 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 6dae63b46368..a925415b4d10 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -274,6 +274,12 @@ config DEBUG_KMEMLEAK_AUTO_SCAN
config PER_VMA_LOCK_STATS
bool "Statistics for per-vma locks"
depends on PER_VMA_LOCK
- default y
help
- Statistics for per-vma locks.
+ Say Y here to enable success, retry and failure counters of page
+ faults handled under protection of per-vma locks. When enabled, the
+ counters are exposed in /proc/vmstat. This information is useful for
+ kernel developers to evaluate effectiveness of per-vma locks and to
+ identify pathological cases. Counting these events introduces a small
+ overhead in the page fault path.
+
+ If in doubt, say N.
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index f98b9f4d9d3e..06141bbc1e51 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -285,7 +285,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
const void *addr;
for (addr = start; addr < start + size; addr += PAGE_SIZE) {
- struct page *page = virt_to_page(addr);
+ struct page *page = vmalloc_to_page(addr);
clear_highpage_kasan_tagged(page);
}
@@ -297,7 +297,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
u8 tag;
unsigned long redzone_start, redzone_size;
- if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) {
+ if (!kasan_vmalloc_enabled()) {
if (flags & KASAN_VMALLOC_INIT)
init_vmalloc_pages(start, size);
return (void *)start;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2068b594dc88..1756389a0609 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -808,8 +808,10 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
vmstart = vma->vm_start;
}
- if (mpol_equal(vma_policy(vma), new_pol))
+ if (mpol_equal(vma_policy(vma), new_pol)) {
+ *prev = vma;
return 0;
+ }
pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,