summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2008-07-24 06:27:43 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 19:47:17 +0200
commita137e1cc6d6e7d315fef03962a2a5a113348b13b (patch)
treeb47e195c392abaa3640cc2f9187d99d58cee664a /mm
parenthugetlb: multiple hstates for multiple page sizes (diff)
downloadlinux-a137e1cc6d6e7d315fef03962a2a5a113348b13b.tar.xz
linux-a137e1cc6d6e7d315fef03962a2a5a113348b13b.zip
hugetlbfs: per mount huge page sizes
Add the ability to configure the hugetlb hstate used on a per mount basis. - Add a new pagesize= option to the hugetlbfs mount that allows setting the page size - This option causes the mount code to find the hstate corresponding to the specified size, and sets up a pointer to the hstate in the mount's superblock. - Change the hstate accessors to use this information rather than the global_hstate they were using (requires a slight change in mm/memory.c so we don't NULL deref in the error-unmap path -- see comments). [np: take hstate out of hugetlbfs inode and vma->vm_private_data] Acked-by: Adam Litke <agl@us.ibm.com> Acked-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/memory.c18
2 files changed, 19 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 82378d44a0c5..4cf7a90e9140 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1439,19 +1439,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
- /*
- * It is undesirable to test vma->vm_file as it should be non-null
- * for valid hugetlb area. However, vm_file will be NULL in the error
- * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
- * do_mmap_pgoff() nullifies vma->vm_file before calling this function
- * to clean up. Since no pte has actually been setup, it is safe to
- * do nothing in this case.
- */
- if (vma->vm_file) {
- spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
- __unmap_hugepage_range(vma, start, end, ref_page);
- spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
- }
+ spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+ __unmap_hugepage_range(vma, start, end, ref_page);
+ spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index c1c1d6d8c22b..02fc6b1047b0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -901,9 +901,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
}
if (unlikely(is_vm_hugetlb_page(vma))) {
- unmap_hugepage_range(vma, start, end, NULL);
- zap_work -= (end - start) /
+ /*
+ * It is undesirable to test vma->vm_file as it
+ * should be non-null for valid hugetlb area.
+ * However, vm_file will be NULL in the error
+ * cleanup path of do_mmap_pgoff. When
+ * hugetlbfs ->mmap method fails,
+ * do_mmap_pgoff() nullifies vma->vm_file
+ * before calling this function to clean up.
+ * Since no pte has actually been setup, it is
+ * safe to do nothing in this case.
+ */
+ if (vma->vm_file) {
+ unmap_hugepage_range(vma, start, end, NULL);
+ zap_work -= (end - start) /
pages_per_huge_page(hstate_vma(vma));
+ }
+
start = end;
} else
start = unmap_page_range(*tlbp, vma,