summaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2023-04-13 15:12:23 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-04-19 01:30:10 +0200
commitd905ae2b0f7eaf8fb37febfe4833ccf3f8c1c27a (patch)
treea631e3fab4646d3e6a0e45051e6b6ae9b9306dd7 /mm/internal.h
parentmm: kmsan: apply __must_check to non-void functions (diff)
downloadlinux-d905ae2b0f7eaf8fb37febfe4833ccf3f8c1c27a.tar.xz
linux-d905ae2b0f7eaf8fb37febfe4833ccf3f8c1c27a.zip
mm: apply __must_check to vmap_pages_range_noflush()
To prevent errors when vmap_pages_range_noflush() or __vmap_pages_range_noflush() silently fail (see the link below for an example), annotate them with __must_check so that the callers do not unconditionally assume the mapping succeeded. Link: https://lkml.kernel.org/r/20230413131223.4135168-4-glider@google.com Signed-off-by: Alexander Potapenko <glider@google.com> Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com> Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/ Reviewed-by: Marco Elver <elver@google.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 92ddd3a05b74..6483db57a31f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -885,7 +885,7 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
*/
#ifdef CONFIG_MMU
void __init vmalloc_init(void);
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift);
#else
static inline void vmalloc_init(void)
@@ -893,16 +893,16 @@ static inline void vmalloc_init(void)
}
static inline
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
return -EINVAL;
}
#endif
-int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
- pgprot_t prot, struct page **pages,
- unsigned int page_shift);
+int __must_check __vmap_pages_range_noflush(unsigned long addr,
+ unsigned long end, pgprot_t prot,
+ struct page **pages, unsigned int page_shift);
void vunmap_range_noflush(unsigned long start, unsigned long end);