summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-30 02:16:30 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 05:40:41 +0100
commit508034a32b819a2d40aa7ac0dbc8cd2e044c2de6 (patch)
tree906a8f0095af24f403b30d649d3ec1ffb4ff2f50 /mm/hugetlb.c
parent[PATCH] mm: unlink vma before pagetables (diff)
downloadlinux-508034a32b819a2d40aa7ac0dbc8cd2e044c2de6.tar.xz
linux-508034a32b819a2d40aa7ac0dbc8cd2e044c2de6.zip
[PATCH] mm: unmap_vmas with inner ptlock
Remove the page_table_lock from around the calls to unmap_vmas, and replace the pte_offset_map in zap_pte_range by pte_offset_map_lock: all callers are now safe to descend without page_table_lock. Don't attempt fancy locking for hugepages, just take page_table_lock in unmap_hugepage_range. Which makes zap_hugepage_range, and the hugetlb test in zap_page_range, redundant: unmap_vmas calls unmap_hugepage_range anyway. Nor does unmap_vmas have much use for its mm arg now. The tlb_start_vma and tlb_end_vma in unmap_page_range are now called without page_table_lock: if they're implemented at all, they typically come down to flush_cache_range (usually done outside page_table_lock) and flush_tlb_range (which we already audited for the mprotect case). Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea0826ff2663..f29b7dc02c39 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -314,6 +314,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
BUG_ON(start & ~HPAGE_MASK);
BUG_ON(end & ~HPAGE_MASK);
+ spin_lock(&mm->page_table_lock);
+
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
@@ -333,17 +335,9 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
put_page(page);
add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
}
- flush_tlb_range(vma, start, end);
-}
-void zap_hugepage_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long length)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- spin_lock(&mm->page_table_lock);
- unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
+ flush_tlb_range(vma, start, end);
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)