diff options
Diffstat (limited to 'drivers/gpu/drm/bochs/bochs_mm.c')
-rw-r--r-- | drivers/gpu/drm/bochs/bochs_mm.c | 69 |
1 files changed, 59 insertions, 10 deletions
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 0980411e41bf..49463348a07a 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -210,33 +210,28 @@ static void bochs_ttm_placement(struct bochs_bo *bo, int domain) bo->placement.num_busy_placement = c; } -static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) -{ - return bo->bo.offset; -} - -int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) +int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag) { struct ttm_operation_ctx ctx = { false, false }; int i, ret; if (bo->pin_count) { bo->pin_count++; - if (gpu_addr) - *gpu_addr = bochs_bo_gpu_offset(bo); return 0; } bochs_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); + if (ret) + return ret; ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + ttm_bo_unreserve(&bo->bo); if (ret) return ret; bo->pin_count = 1; - if (gpu_addr) - *gpu_addr = bochs_bo_gpu_offset(bo); return 0; } @@ -256,7 +251,11 @@ int bochs_bo_unpin(struct bochs_bo *bo) for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); + if (ret) + return ret; ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); + ttm_bo_unreserve(&bo->bo); if (ret) return ret; @@ -396,3 +395,53 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev, drm_gem_object_put_unlocked(obj); return 0; } + +/* ---------------------------------------------------------------------- */ + +int bochs_gem_prime_pin(struct drm_gem_object *obj) +{ + struct bochs_bo *bo = gem_to_bochs_bo(obj); + + return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM); +} + +void bochs_gem_prime_unpin(struct drm_gem_object *obj) +{ + struct bochs_bo *bo = gem_to_bochs_bo(obj); + + bochs_bo_unpin(bo); +} + +void *bochs_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct bochs_bo *bo = gem_to_bochs_bo(obj); + bool is_iomem; + int ret; + + ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM); + if (ret) + return NULL; + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { + bochs_bo_unpin(bo); + return NULL; + } + return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); +} + +void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct bochs_bo *bo = gem_to_bochs_bo(obj); + + ttm_bo_kunmap(&bo->kmap); + bochs_bo_unpin(bo); +} + +int bochs_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + struct bochs_bo *bo = gem_to_bochs_bo(obj); + + bo->gem.vma_node.vm_node.start = bo->bo.vma_node.vm_node.start; + return drm_gem_prime_mmap(obj, vma); +} |