summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-03-22 00:34:14 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 01:54:59 +0100
commitb69add218d32450d6604bc9080f6e33e19b06f5e (patch)
tree67c91d3534c2f7bf01b03a49be7543abe77d16a6 /arch/x86/mm
parenthugetlbfs: fix alignment of huge page requests (diff)
downloadlinux-b69add218d32450d6604bc9080f6e33e19b06f5e.tar.xz
linux-b69add218d32450d6604bc9080f6e33e19b06f5e.zip
hugetlb: remove prev_vma from hugetlb_get_unmapped_area_topdown()
After looking up the vma which covers or follows the cached search address, the following condition is always true: !prev_vma || (addr >= prev_vma->vm_end) so we can stop checking the previous VMA altogether. Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/hugetlbpage.c18
1 files changed, 5 insertions, 13 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index c20e81c3425d..f6679a7fb8ca 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -308,7 +308,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev_vma;
+ struct vm_area_struct *vma;
unsigned long base = mm->mmap_base;
unsigned long addr = addr0;
unsigned long largest_hole = mm->cached_hole_size;
@@ -340,22 +340,14 @@ try_again:
if (!vma)
return addr;
- /*
- * new region fits between prev_vma->vm_end and
- * vma->vm_start, use it:
- */
- prev_vma = vma->vm_prev;
- if (addr + len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end))) {
+ if (addr + len <= vma->vm_start) {
/* remember the address as a hint for next time */
mm->cached_hole_size = largest_hole;
return (mm->free_area_cache = addr);
- } else {
+ } else if (mm->free_area_cache == vma->vm_end) {
/* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end) {
- mm->free_area_cache = vma->vm_start;
- mm->cached_hole_size = largest_hole;
- }
+ mm->free_area_cache = vma->vm_start;
+ mm->cached_hole_size = largest_hole;
}
/* remember the largest hole we saw so far */