diff options
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 68 |
1 files changed, 28 insertions, 40 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b6ac27e31929..d8bc59c7e261 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = ((unsigned long)vmf->virtual_address - - vma->vm_start) >> PAGE_SHIFT; + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; pfn = page_to_pfn(pages[pgoff]); - VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, + VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); out_unlock: mutex_unlock(&dev->struct_mutex); @@ -296,12 +294,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - struct msm_mmu *mmu = priv->mmus[id]; - if (mmu && msm_obj->domain[id].iova) { - uint32_t offset = msm_obj->domain[id].iova; - mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); - msm_obj->domain[id].iova = 0; - } + msm_gem_unmap_vma(priv->aspace[id], + &msm_obj->domain[id], msm_obj->sgt); } } @@ -313,7 +307,7 @@ put_iova(struct drm_gem_object *obj) * the refcnt counter needs to be atomic_t. */ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova) + uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; @@ -326,16 +320,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { - struct msm_mmu *mmu = priv->mmus[id]; - uint32_t offset; - - if (WARN_ON(!mmu)) - return -EINVAL; - - offset = (uint32_t)mmap_offset(obj); - ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, - obj->size, IOMMU_READ | IOMMU_WRITE); - msm_obj->domain[id].iova = offset; + ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], + msm_obj->sgt, obj->size >> PAGE_SHIFT); } else { msm_obj->domain[id].iova = physaddr(obj); } @@ -348,7 +334,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, } /* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; @@ -370,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) /* get iova without taking a reference, used in places where you have * already done a 'msm_gem_get_iova()'. */ -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) +uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!msm_obj->domain[id].iova); @@ -521,7 +507,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object_list *fobj; - struct fence *fence; + struct dma_fence *fence; int i, ret; if (!exclusive) { @@ -540,7 +526,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fence = reservation_object_get_excl(msm_obj->resv); /* don't need to wait on our own fences, since ring is fifo */ if (fence && (fence->context != fctx->context)) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -553,7 +539,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(msm_obj->resv)); if (fence->context != fctx->context) { - ret = fence_wait(fence, true); + ret = dma_fence_wait(fence, true); if (ret) return ret; } @@ -563,7 +549,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj, } void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool exclusive, struct fence *fence) + struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence) { struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); @@ -616,10 +602,10 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) } #ifdef CONFIG_DEBUG_FS -static void describe_fence(struct fence *fence, const char *type, +static void describe_fence(struct dma_fence *fence, const char *type, struct seq_file *m) { - if (!fence_is_signaled(fence)) + if (!dma_fence_is_signaled(fence)) seq_printf(m, "\t%9s: %s %s seq %u\n", type, fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), @@ -631,9 +617,11 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; - struct fence *fence; + struct msm_drm_private *priv = obj->dev->dev_private; + struct dma_fence *fence; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; + unsigned id; WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); @@ -650,10 +638,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) break; } - seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', obj->name, obj->refcount.refcount.counter, - off, msm_obj->vaddr, obj->size, madv); + off, msm_obj->vaddr); + + for (id = 0; id < priv->num_aspaces; id++) + seq_printf(m, " %08llx", msm_obj->domain[id].iova); + + seq_printf(m, " %zu%s\n", obj->size, madv); rcu_read_lock(); fobj = rcu_dereference(robj->fence); @@ -761,7 +754,6 @@ static int msm_gem_new_impl(struct drm_device *dev, { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; - unsigned sz; bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { @@ -783,16 +775,12 @@ static int msm_gem_new_impl(struct drm_device *dev, if (WARN_ON(use_vram && !priv->vram.size)) return -EINVAL; - sz = sizeof(*msm_obj); - if (use_vram) - sz += sizeof(struct drm_mm_node); - - msm_obj = kzalloc(sz, GFP_KERNEL); + msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); if (!msm_obj) return -ENOMEM; if (use_vram) - msm_obj->vram_node = (void *)&msm_obj[1]; + msm_obj->vram_node = &msm_obj->domain[0].node; msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; |