summaryrefslogtreecommitdiffstats
path: root/mm/usercopy.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-11 00:15:29 +0100
committerKees Cook <keescook@chromium.org>2022-04-13 21:15:51 +0200
commitab502103ae3ce4c0fc393e598455efede3e523c9 (patch)
tree76cc3109c004c93473f70759b100ac083ff03649 /mm/usercopy.c
parentmm/usercopy: Detect vmalloc overruns (diff)
downloadlinux-ab502103ae3ce4c0fc393e598455efede3e523c9.tar.xz
linux-ab502103ae3ce4c0fc393e598455efede3e523c9.zip
mm/usercopy: Detect large folio overruns
Move the compound page overrun detection out of CONFIG_HARDENED_USERCOPY_PAGESPAN and convert it to use folios so it's enabled for more people. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Kees Cook <keescook@chromium.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220110231530.665970-4-willy@infradead.org
Diffstat (limited to 'mm/usercopy.c')
-rw-r--r--mm/usercopy.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index e1e856dca124..9458c2b24b02 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -164,7 +164,6 @@ static inline void check_page_span(const void *ptr, unsigned long n,
{
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
- struct page *endpage;
bool is_reserved, is_cma;
/*
@@ -195,11 +194,6 @@ static inline void check_page_span(const void *ptr, unsigned long n,
((unsigned long)end & (unsigned long)PAGE_MASK)))
return;
- /* Allow if fully inside the same compound (__GFP_COMP) page. */
- endpage = virt_to_head_page(end);
- if (likely(endpage == page))
- return;
-
/*
* Reject if range is entirely either Reserved (i.e. special or
* device memory), or CMA. Otherwise, reject since the object spans
@@ -259,6 +253,10 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
if (folio_test_slab(folio)) {
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user);
+ } else if (folio_test_large(folio)) {
+ unsigned long offset = ptr - folio_address(folio);
+ if (offset + n > folio_size(folio))
+ usercopy_abort("page alloc", NULL, to_user, offset, n);
} else {
/* Verify object does not incorrectly span multiple pages. */
check_page_span(ptr, n, folio_page(folio, 0), to_user);