diff options
author | Shiraz Saleem <shiraz.saleem@intel.com> | 2019-05-06 15:53:36 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-05-06 18:08:11 +0200 |
commit | db6c6774af0d4861a7c5181ecc3c9ac320de46d9 (patch) | |
tree | e7a0a37618a222e11dc1ab401b8eaaf442a39ff3 /drivers/infiniband | |
parent | RDMA/bnxt_re: Use core helpers to get aligned DMA address (diff) | |
download | linux-db6c6774af0d4861a7c5181ecc3c9ac320de46d9.tar.xz linux-db6c6774af0d4861a7c5181ecc3c9ac320de46d9.zip |
RDMA/umem: Remove hugetlb flag
The drivers i40iw and bnxt_re no longer dependent on the hugetlb flag. So
remove this flag from ib_umem structure.
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/umem.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/core/umem_odp.c | 3 |
2 files changed, 1 insertions, 28 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 145c31c530ae..0a23048db523 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -37,7 +37,6 @@ #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/export.h> -#include <linux/hugetlb.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <rdma/ib_umem_odp.h> @@ -199,14 +198,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, struct ib_ucontext *context; struct ib_umem *umem; struct page **page_list; - struct vm_area_struct **vma_list; unsigned long lock_limit; unsigned long new_pinned; unsigned long cur_base; struct mm_struct *mm; unsigned long npages; int ret; - int i; unsigned long dma_attrs = 0; struct scatterlist *sg; unsigned int gup_flags = FOLL_WRITE; @@ -264,23 +261,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, return umem; } - /* We assume the memory is from hugetlb until proved otherwise */ - umem->hugetlb = 1; - page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { ret = -ENOMEM; goto umem_kfree; } - /* - * if we can't alloc the vma_list, it's not so bad; - * just assume the memory is not hugetlb memory - */ - vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); - if (!vma_list) - umem->hugetlb = 0; - npages = ib_umem_num_pages(umem); if (npages == 0 || npages > UINT_MAX) { ret = -EINVAL; @@ -312,7 +298,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, ret = get_user_pages_longterm(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof (struct page *)), - gup_flags, page_list, vma_list); + gup_flags, page_list, NULL); if (ret < 0) { up_read(&mm->mmap_sem); goto umem_release; @@ -325,14 +311,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, dma_get_max_seg_size(context->device->dma_device), &umem->sg_nents); - /* Continue to hold the mmap_sem as vma_list access - * needs to be protected. - */ - for (i = 0; i < ret && umem->hugetlb; i++) { - if (vma_list && !is_vm_hugetlb_page(vma_list[i])) - umem->hugetlb = 0; - } - up_read(&mm->mmap_sem); } @@ -357,8 +335,6 @@ umem_release: vma: atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); out: - if (vma_list) - free_page((unsigned long) vma_list); free_page((unsigned long) page_list); umem_kfree: if (ret) { diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 97219143f16f..c7226cf52acc 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -417,9 +417,6 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) h = hstate_vma(vma); umem->page_shift = huge_page_shift(h); up_read(&mm->mmap_sem); - umem->hugetlb = 1; - } else { - umem->hugetlb = 0; } mutex_init(&umem_odp->umem_mutex); |