summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Zhang <zhangalex@google.com>2020-08-07 08:22:24 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 20:33:26 +0200
commit0c4123e3fb82d6014d0a70b52eb38153f658541c (patch)
tree5c656de44bab39604512c9438a665f657b09aec6
parentmm: remove redundant check non_swap_entry() (diff)
downloadlinux-0c4123e3fb82d6014d0a70b52eb38153f658541c.tar.xz
linux-0c4123e3fb82d6014d0a70b52eb38153f658541c.zip
mm/memory.c: make remap_pfn_range() reject unaligned addr
This function implicitly assumes that the addr passed in is page aligned. A non page aligned addr could ultimately cause a kernel bug in remap_pte_range as the exit condition in the logic loop may never be satisfied. This patch documents the need for the requirement, as well as explicitly adds a check for it. Signed-off-by: Alex Zhang <zhangalex@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20200617233512.177519-1-zhangalex@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memory.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index f4f27b9d48c5..c39a13b09602 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2082,7 +2082,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
- * @addr: target user address to start at
+ * @addr: target page aligned user address to start at
* @pfn: page frame number of kernel physical memory address
* @size: size of mapping area
* @prot: page protection flags for this mapping
@@ -2101,6 +2101,9 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long remap_pfn = pfn;
int err;
+ if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
+ return -EINVAL;
+
/*
* Physically remapped pages are special. Tell the
* rest of the world about it: